Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

BPF alignment tests got a conflict because the registers
are output as Rn_w instead of just Rn in net-next, and
in net a fixup for a testcase prohibits logical operations
on pointers before using them.

Also, we should attempt to patch BPF call args if JIT always on is
enabled.  Instead, if we fail to JIT the subprogs we should pass
an error back up and fail immediately.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/bpf/bpf_devel_QA.txt b/Documentation/bpf/bpf_devel_QA.txt
new file mode 100644
index 0000000..cefef85
--- /dev/null
+++ b/Documentation/bpf/bpf_devel_QA.txt
@@ -0,0 +1,519 @@
+This document provides information for the BPF subsystem about various
+workflows related to reporting bugs, submitting patches, and queueing
+patches for stable kernels.
+
+For general information about submitting patches, please refer to
+Documentation/process/. This document only describes additional specifics
+related to BPF.
+
+Reporting bugs:
+---------------
+
+Q: How do I report bugs for BPF kernel code?
+
+A: Since all BPF kernel development as well as bpftool and iproute2 BPF
+   loader development happens through the netdev kernel mailing list,
+   please report any found issues around BPF to the following mailing
+   list:
+
+     netdev@vger.kernel.org
+
+   This may also include issues related to XDP, BPF tracing, etc.
+
+   Given netdev has a high volume of traffic, please also add the BPF
+   maintainers to Cc (from kernel MAINTAINERS file):
+
+     Alexei Starovoitov <ast@kernel.org>
+     Daniel Borkmann <daniel@iogearbox.net>
+
+   In case a buggy commit has already been identified, make sure to keep
+   the actual commit authors in Cc as well for the report. They can
+   typically be identified through the kernel's git tree.
+
+   Please do *not* report BPF issues to bugzilla.kernel.org since it
+   is a guarantee that the reported issue will be overlooked.
+
+Submitting patches:
+-------------------
+
+Q: To which mailing list do I need to submit my BPF patches?
+
+A: Please submit your BPF patches to the netdev kernel mailing list:
+
+     netdev@vger.kernel.org
+
+   Historically, BPF came out of networking and has always been maintained
+   by the kernel networking community. Although these days BPF touches
+   many other subsystems as well, the patches are still routed mainly
+   through the networking community.
+
+   In case your patch has changes in various different subsystems (e.g.
+   tracing, security, etc), make sure to Cc the related kernel mailing
+   lists and maintainers from there as well, so they are able to review
+   the changes and provide their Acked-by's to the patches.
+
+Q: Where can I find patches currently under discussion for BPF subsystem?
+
+A: All patches that are Cc'ed to netdev are queued for review under netdev
+   patchwork project:
+
+     http://patchwork.ozlabs.org/project/netdev/list/
+
+   Those patches which target BPF, are assigned to a 'bpf' delegate for
+   further processing from BPF maintainers. The current queue with
+   patches under review can be found at:
+
+     https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
+
+   Once the patches have been reviewed by the BPF community as a whole
+   and approved by the BPF maintainers, their status in patchwork will be
+   changed to 'Accepted' and the submitter will be notified by mail. This
+   means that the patches look good from a BPF perspective and have been
+   applied to one of the two BPF kernel trees.
+
+   In case feedback from the community requires a respin of the patches,
+   their status in patchwork will be set to 'Changes Requested', and purged
+   from the current review queue. Likewise for cases where patches would
+   get rejected or are not applicable to the BPF trees (but assigned to
+   the 'bpf' delegate).
+
+Q: How do the changes make their way into Linux?
+
+A: There are two BPF kernel trees (git repositories). Once patches have
+   been accepted by the BPF maintainers, they will be applied to one
+   of the two BPF trees:
+
+     https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git/
+     https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/
+
+   The bpf tree itself is for fixes only, whereas bpf-next for features,
+   cleanups or other kind of improvements ("next-like" content). This is
+   analogous to net and net-next trees for networking. Both bpf and
+   bpf-next will only have a master branch in order to simplify against
+   which branch patches should get rebased to.
+
+   Accumulated BPF patches in the bpf tree will regularly get pulled
+   into the net kernel tree. Likewise, accumulated BPF patches accepted
+   into the bpf-next tree will make their way into net-next tree. net and
+   net-next are both run by David S. Miller. From there, they will go
+   into the kernel mainline tree run by Linus Torvalds. To read up on the
+   process of net and net-next being merged into the mainline tree, see
+   the netdev FAQ under:
+
+     Documentation/networking/netdev-FAQ.txt
+
+   Occasionally, to prevent merge conflicts, we might send pull requests
+   to other trees (e.g. tracing) with a small subset of the patches, but
+   net and net-next are always the main trees targeted for integration.
+
+   The pull requests will contain a high-level summary of the accumulated
+   patches and can be searched on netdev kernel mailing list through the
+   following subject lines (yyyy-mm-dd is the date of the pull request):
+
+     pull-request: bpf yyyy-mm-dd
+     pull-request: bpf-next yyyy-mm-dd
+
+Q: How do I indicate which tree (bpf vs. bpf-next) my patch should be
+   applied to?
+
+A: The process is the very same as described in the netdev FAQ, so
+   please read up on it. The subject line must indicate whether the
+   patch is a fix or rather "next-like" content in order to let the
+   maintainers know whether it is targeted at bpf or bpf-next.
+
+   For fixes eventually landing in bpf -> net tree, the subject must
+   look like:
+
+     git format-patch --subject-prefix='PATCH bpf' start..finish
+
+   For features/improvements/etc that should eventually land in
+   bpf-next -> net-next, the subject must look like:
+
+     git format-patch --subject-prefix='PATCH bpf-next' start..finish
+
+   If unsure whether the patch or patch series should go into bpf
+   or net directly, or bpf-next or net-next directly, it is not a
+   problem either if the subject line says net or net-next as target.
+   It is eventually up to the maintainers to do the delegation of
+   the patches.
+
+   If it is clear that patches should go into bpf or bpf-next tree,
+   please make sure to rebase the patches against those trees in
+   order to reduce potential conflicts.
+
+   In case the patch or patch series has to be reworked and sent out
+   again in a second or later revision, it is also required to add a
+   version number (v2, v3, ...) into the subject prefix:
+
+     git format-patch --subject-prefix='PATCH net-next v2' start..finish
+
+   When changes have been requested to the patch series, always send the
+   whole patch series again with the feedback incorporated (never send
+   individual diffs on top of the old series).
+
+Q: What does it mean when a patch gets applied to bpf or bpf-next tree?
+
+A: It means that the patch looks good for mainline inclusion from
+   a BPF point of view.
+
+   Be aware that this is not a final verdict that the patch will
+   automatically get accepted into net or net-next trees eventually:
+
+   On the netdev kernel mailing list reviews can come in at any point
+   in time. If discussions around a patch conclude that they cannot
+   get included as-is, we will either apply a follow-up fix or drop
+   them from the trees entirely. Therefore, we also reserve to rebase
+   the trees when deemed necessary. After all, the purpose of the tree
+   is to i) accumulate and stage BPF patches for integration into trees
+   like net and net-next, and ii) run extensive BPF test suite and
+   workloads on the patches before they make their way any further.
+
+   Once the BPF pull request was accepted by David S. Miller, then
+   the patches end up in net or net-next tree, respectively, and
+   make their way from there further into mainline. Again, see the
+   netdev FAQ for additional information e.g. on how often they are
+   merged to mainline.
+
+Q: How long do I need to wait for feedback on my BPF patches?
+
+A: We try to keep the latency low. The usual time to feedback will
+   be around 2 or 3 business days. It may vary depending on the
+   complexity of changes and current patch load.
+
+Q: How often do you send pull requests to major kernel trees like
+   net or net-next?
+
+A: Pull requests will be sent out rather often in order to not
+   accumulate too many patches in bpf or bpf-next.
+
+   As a rule of thumb, expect pull requests for each tree regularly
+   at the end of the week. In some cases pull requests could additionally
+   come also in the middle of the week depending on the current patch
+   load or urgency.
+
+Q: Are patches applied to bpf-next when the merge window is open?
+
+A: For the time when the merge window is open, bpf-next will not be
+   processed. This is roughly analogous to net-next patch processing,
+   so feel free to read up on the netdev FAQ about further details.
+
+   During those two weeks of merge window, we might ask you to resend
+   your patch series once bpf-next is open again. Once Linus released
+   a v*-rc1 after the merge window, we continue processing of bpf-next.
+
+   For non-subscribers to kernel mailing lists, there is also a status
+   page run by David S. Miller on net-next that provides guidance:
+
+     http://vger.kernel.org/~davem/net-next.html
+
+Q: I made a BPF verifier change, do I need to add test cases for
+   BPF kernel selftests?
+
+A: If the patch has changes to the behavior of the verifier, then yes,
+   it is absolutely necessary to add test cases to the BPF kernel
+   selftests suite. If they are not present and we think they are
+   needed, then we might ask for them before accepting any changes.
+
+   In particular, test_verifier.c is tracking a high number of BPF test
+   cases, including a lot of corner cases that LLVM BPF back end may
+   generate out of the restricted C code. Thus, adding test cases is
+   absolutely crucial to make sure future changes do not accidentally
+   affect prior use-cases. Thus, treat those test cases as: verifier
+   behavior that is not tracked in test_verifier.c could potentially
+   be subject to change.
+
+Q: When should I add code to samples/bpf/ and when to BPF kernel
+   selftests?
+
+A: In general, we prefer additions to BPF kernel selftests rather than
+   samples/bpf/. The rationale is very simple: kernel selftests are
+   regularly run by various bots to test for kernel regressions.
+
+   The more test cases we add to BPF selftests, the better the coverage
+   and the less likely it is that those could accidentally break. It is
+   not that BPF kernel selftests cannot demo how a specific feature can
+   be used.
+
+   That said, samples/bpf/ may be a good place for people to get started,
+   so it might be advisable that simple demos of features could go into
+   samples/bpf/, but advanced functional and corner-case testing rather
+   into kernel selftests.
+
+   If your sample looks like a test case, then go for BPF kernel selftests
+   instead!
+
+Q: When should I add code to the bpftool?
+
+A: The main purpose of bpftool (under tools/bpf/bpftool/) is to provide
+   a central user space tool for debugging and introspection of BPF programs
+   and maps that are active in the kernel. If UAPI changes related to BPF
+   enable for dumping additional information of programs or maps, then
+   bpftool should be extended as well to support dumping them.
+
+Q: When should I add code to iproute2's BPF loader?
+
+A: For UAPI changes related to the XDP or tc layer (e.g. cls_bpf), the
+   convention is that those control-path related changes are added to
+   iproute2's BPF loader as well from user space side. This is not only
+   useful to have UAPI changes properly designed to be usable, but also
+   to make those changes available to a wider user base of major
+   downstream distributions.
+
+Q: Do you accept patches as well for iproute2's BPF loader?
+
+A: Patches for the iproute2's BPF loader have to be sent to:
+
+     netdev@vger.kernel.org
+
+   While those patches are not processed by the BPF kernel maintainers,
+   please keep them in Cc as well, so they can be reviewed.
+
+   The official git repository for iproute2 is run by Stephen Hemminger
+   and can be found at:
+
+     https://git.kernel.org/pub/scm/linux/kernel/git/shemminger/iproute2.git/
+
+   The patches need to have a subject prefix of '[PATCH iproute2 master]'
+   or '[PATCH iproute2 net-next]'. 'master' or 'net-next' describes the
+   target branch where the patch should be applied to. Meaning, if kernel
+   changes went into the net-next kernel tree, then the related iproute2
+   changes need to go into the iproute2 net-next branch, otherwise they
+   can be targeted at master branch. The iproute2 net-next branch will get
+   merged into the master branch after the current iproute2 version from
+   master has been released.
+
+   Like BPF, the patches end up in patchwork under the netdev project and
+   are delegated to 'shemminger' for further processing:
+
+     http://patchwork.ozlabs.org/project/netdev/list/?delegate=389
+
+Q: What is the minimum requirement before I submit my BPF patches?
+
+A: When submitting patches, always take the time and properly test your
+   patches *prior* to submission. Never rush them! If maintainers find
+   that your patches have not been properly tested, it is a good way to
+   get them grumpy. Testing patch submissions is a hard requirement!
+
+   Note, fixes that go to bpf tree *must* have a Fixes: tag included. The
+   same applies to fixes that target bpf-next, where the affected commit
+   is in net-next (or in some cases bpf-next). The Fixes: tag is crucial
+   in order to identify follow-up commits and tremendously helps for people
+   having to do backporting, so it is a must have!
+
+   We also don't accept patches with an empty commit message. Take your
+   time and properly write up a high quality commit message, it is
+   essential!
+
+   Think about it this way: other developers looking at your code a month
+   from now need to understand *why* a certain change has been done that
+   way, and whether there have been flaws in the analysis or assumptions
+   that the original author did. Thus providing a proper rationale and
+   describing the use-case for the changes is a must.
+
+   Patch submissions with >1 patch must have a cover letter which includes
+   a high level description of the series. This high level summary will
+   then be placed into the merge commit by the BPF maintainers such that
+   it is also accessible from the git log for future reference.
+
+Q: What do I need to consider when adding a new instruction or feature
+   that would require BPF JIT and/or LLVM integration as well?
+
+A: We try hard to keep all BPF JITs up to date such that the same user
+   experience can be guaranteed when running BPF programs on different
+   architectures without having the program punt to the less efficient
+   interpreter in case the in-kernel BPF JIT is enabled.
+
+   If you are unable to implement or test the required JIT changes for
+   certain architectures, please work together with the related BPF JIT
+   developers in order to get the feature implemented in a timely manner.
+   Please refer to the git log (arch/*/net/) to locate the necessary
+   people for helping out.
+
+   Also always make sure to add BPF test cases (e.g. test_bpf.c and
+   test_verifier.c) for new instructions, so that they can receive
+   broad test coverage and help run-time testing the various BPF JITs.
+
+   In case of new BPF instructions, once the changes have been accepted
+   into the Linux kernel, please implement support into LLVM's BPF back
+   end. See LLVM section below for further information.
+
+Stable submission:
+------------------
+
+Q: I need a specific BPF commit in stable kernels. What should I do?
+
+A: In case you need a specific fix in stable kernels, first check whether
+   the commit has already been applied in the related linux-*.y branches:
+
+     https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/
+
+   If not the case, then drop an email to the BPF maintainers with the
+   netdev kernel mailing list in Cc and ask for the fix to be queued up:
+
+     netdev@vger.kernel.org
+
+   The process in general is the same as on netdev itself, see also the
+   netdev FAQ document.
+
+Q: Do you also backport to kernels not currently maintained as stable?
+
+A: No. If you need a specific BPF commit in kernels that are currently not
+   maintained by the stable maintainers, then you are on your own.
+
+   The current stable and longterm stable kernels are all listed here:
+
+     https://www.kernel.org/
+
+Q: The BPF patch I am about to submit needs to go to stable as well. What
+   should I do?
+
+A: The same rules apply as with netdev patch submissions in general, see
+   netdev FAQ under:
+
+     Documentation/networking/netdev-FAQ.txt
+
+   Never add "Cc: stable@vger.kernel.org" to the patch description, but
+   ask the BPF maintainers to queue the patches instead. This can be done
+   with a note, for example, under the "---" part of the patch which does
+   not go into the git log. Alternatively, this can be done as a simple
+   request by mail instead.
+
+Q: Where do I find currently queued BPF patches that will be submitted
+   to stable?
+
+A: Once patches that fix critical bugs got applied into the bpf tree, they
+   are queued up for stable submission under:
+
+     http://patchwork.ozlabs.org/bundle/bpf/stable/?state=*
+
+   They will be on hold there at minimum until the related commit made its
+   way into the mainline kernel tree.
+
+   After having been under broader exposure, the queued patches will be
+   submitted by the BPF maintainers to the stable maintainers.
+
+Testing patches:
+----------------
+
+Q: Which BPF kernel selftests version should I run my kernel against?
+
+A: If you run a kernel xyz, then always run the BPF kernel selftests from
+   that kernel xyz as well. Do not expect that the BPF selftest from the
+   latest mainline tree will pass all the time.
+
+   In particular, test_bpf.c and test_verifier.c have a large number of
+   test cases and are constantly updated with new BPF test sequences, or
+   existing ones are adapted to verifier changes e.g. due to verifier
+   becoming smarter and being able to better track certain things.
+
+LLVM:
+-----
+
+Q: Where do I find LLVM with BPF support?
+
+A: The BPF back end for LLVM is upstream in LLVM since version 3.7.1.
+
+   All major distributions these days ship LLVM with BPF back end enabled,
+   so for the majority of use-cases it is not required to compile LLVM by
+   hand anymore, just install the distribution provided package.
+
+   LLVM's static compiler lists the supported targets through 'llc --version',
+   make sure BPF targets are listed. Example:
+
+     $ llc --version
+     LLVM (http://llvm.org/):
+       LLVM version 6.0.0svn
+       Optimized build.
+       Default target: x86_64-unknown-linux-gnu
+       Host CPU: skylake
+
+       Registered Targets:
+         bpf    - BPF (host endian)
+         bpfeb  - BPF (big endian)
+         bpfel  - BPF (little endian)
+         x86    - 32-bit X86: Pentium-Pro and above
+         x86-64 - 64-bit X86: EM64T and AMD64
+
+   For developers in order to utilize the latest features added to LLVM's
+   BPF back end, it is advisable to run the latest LLVM releases. Support
+   for new BPF kernel features such as additions to the BPF instruction
+   set are often developed together.
+
+   All LLVM releases can be found at: http://releases.llvm.org/
+
+Q: Got it, so how do I build LLVM manually anyway?
+
+A: You need cmake and gcc-c++ as build requisites for LLVM. Once you have
+   that set up, proceed with building the latest LLVM and clang version
+   from the git repositories:
+
+     $ git clone http://llvm.org/git/llvm.git
+     $ cd llvm/tools
+     $ git clone --depth 1 http://llvm.org/git/clang.git
+     $ cd ..; mkdir build; cd build
+     $ cmake .. -DLLVM_TARGETS_TO_BUILD="BPF;X86" \
+                -DBUILD_SHARED_LIBS=OFF           \
+                -DCMAKE_BUILD_TYPE=Release        \
+                -DLLVM_BUILD_RUNTIME=OFF
+     $ make -j $(getconf _NPROCESSORS_ONLN)
+
+   The built binaries can then be found in the build/bin/ directory, where
+   you can point the PATH variable to.
+
+Q: Should I notify BPF kernel maintainers about issues in LLVM's BPF code
+   generation back end or about LLVM generated code that the verifier
+   refuses to accept?
+
+A: Yes, please do! LLVM's BPF back end is a key piece of the whole BPF
+   infrastructure and it ties deeply into verification of programs from the
+   kernel side. Therefore, any issues on either side need to be investigated
+   and fixed whenever necessary.
+
+   Therefore, please make sure to bring them up at netdev kernel mailing
+   list and Cc BPF maintainers for LLVM and kernel bits:
+
+     Yonghong Song <yhs@fb.com>
+     Alexei Starovoitov <ast@kernel.org>
+     Daniel Borkmann <daniel@iogearbox.net>
+
+   LLVM also has an issue tracker where BPF related bugs can be found:
+
+     https://bugs.llvm.org/buglist.cgi?quicksearch=bpf
+
+   However, it is better to reach out through mailing lists with having
+   maintainers in Cc.
+
+Q: I have added a new BPF instruction to the kernel, how can I integrate
+   it into LLVM?
+
+A: LLVM has a -mcpu selector for the BPF back end in order to allow the
+   selection of BPF instruction set extensions. By default the 'generic'
+   processor target is used, which is the base instruction set (v1) of BPF.
+
+   LLVM has an option to select -mcpu=probe where it will probe the host
+   kernel for supported BPF instruction set extensions and selects the
+   optimal set automatically.
+
+   For cross-compilation, a specific version can be select manually as well.
+
+     $ llc -march bpf -mcpu=help
+     Available CPUs for this target:
+
+       generic - Select the generic processor.
+       probe   - Select the probe processor.
+       v1      - Select the v1 processor.
+       v2      - Select the v2 processor.
+     [...]
+
+   Newly added BPF instructions to the Linux kernel need to follow the same
+   scheme, bump the instruction set version and implement probing for the
+   extensions such that -mcpu=probe users can benefit from the optimization
+   transparently when upgrading their kernels.
+
+   If you are unable to implement support for the newly added BPF instruction
+   please reach out to BPF developers for help.
+
+   By the way, the BPF kernel selftests run with -mcpu=probe for better
+   test coverage.
+
+Happy BPF hacking!
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 9a734d8..b7336b9 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -2,7 +2,10 @@
 
 Required properties:
 
-- compatible: should be "brcm,bcm7445-switch-v4.0" or "brcm,bcm7278-switch-v4.0"
+- compatible: should be one of
+	"brcm,bcm7445-switch-v4.0"
+	"brcm,bcm7278-switch-v4.0"
+	"brcm,bcm7278-switch-v4.8"
 - reg: addresses and length of the register sets for the device, must be 6
   pairs of register addresses and lengths
 - interrupts: interrupts for the devices, must be two interrupts
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index 56d6cc3..bfc0c43 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -18,6 +18,12 @@
 
 - xceiver-supply: Regulator that powers the CAN transceiver
 
+- big-endian: This means the registers of FlexCAN controller are big endian.
+              This is optional property.i.e. if this property is not present in
+              device tree node then controller is assumed to be little endian.
+              if this property is present then controller is assumed to be big
+              endian.
+
 Example:
 
 	can@1c000 {
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index f0dc944..2d41fb9 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -59,7 +59,7 @@
 	reg = <0x83fec000 0x4000>;
 	interrupts = <87>;
 	phy-mode = "mii";
-	phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
+	phy-reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>; /* GPIO2_14 */
 	local-mac-address = [00 04 9F 01 1B B9];
 	phy-supply = <&reg_fec_supply>;
 };
@@ -71,7 +71,7 @@
 	reg = <0x83fec000 0x4000>;
 	interrupts = <87>;
 	phy-mode = "mii";
-	phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
+	phy-reset-gpios = <&gpio2 14 GPIO_ACTIVE_LOW>; /* GPIO2_14 */
 	local-mac-address = [00 04 9F 01 1B B9];
 	phy-supply = <&reg_fec_supply>;
 	phy-handle = <&ethphy>;
diff --git a/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt b/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
index dea5124..d24172c 100644
--- a/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
+++ b/Documentation/devicetree/bindings/net/ieee802154/adf7242.txt
@@ -1,7 +1,7 @@
 * ADF7242 IEEE 802.15.4 *
 
 Required properties:
-  - compatible:		should be "adi,adf7242"
+  - compatible:		should be "adi,adf7242", "adi,adf7241"
   - spi-max-frequency:	maximal bus speed (12.5 MHz)
   - reg:		the chipselect index
   - interrupts:		the interrupt generated by the device via pin IRQ1.
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 214eaa9..53c13ee 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -28,7 +28,7 @@
 - mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup
 	which is required for those SoCs equipped with SGMII such as MT7622 SoC.
 - mediatek,pctl: phandle to the syscon node that handles the ports slew rate
-	and driver current
+	and driver current: only for MT2701 and MT7623 SoC
 
 Optional properties:
 - interrupt-parent: Should be the phandle for the interrupt controller
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 77d0b2a..d2169a5 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -53,6 +53,14 @@
   to ensure the integrated PHY is used. The absence of this property indicates
   the muxers should be configured so that the external PHY is used.
 
+- reset-gpios: The GPIO phandle and specifier for the PHY reset signal.
+
+- reset-assert-us: Delay after the reset was asserted in microseconds.
+  If this property is missing the delay will be skipped.
+
+- reset-deassert-us: Delay after the reset was deasserted in microseconds.
+  If this property is missing the delay will be skipped.
+
 Example:
 
 ethernet-phy@0 {
@@ -60,4 +68,8 @@
 	interrupt-parent = <&PIC>;
 	interrupts = <35 IRQ_TYPE_EDGE_RISING>;
 	reg = <0>;
+
+	reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+	reset-assert-us = <1000>;
+	reset-deassert-us = <2000>;
 };
diff --git a/Documentation/devicetree/bindings/net/sff,sfp.txt b/Documentation/devicetree/bindings/net/sff,sfp.txt
index 60e970c..f1c441b 100644
--- a/Documentation/devicetree/bindings/net/sff,sfp.txt
+++ b/Documentation/devicetree/bindings/net/sff,sfp.txt
@@ -3,7 +3,9 @@
 
 Required properties:
 
-- compatible : must be "sff,sfp"
+- compatible : must be one of
+  "sff,sfp" for SFP modules
+  "sff,sff" for soldered down SFF modules
 
 Optional Properties:
 
@@ -11,7 +13,8 @@
   interface
 
 - mod-def0-gpios : GPIO phandle and a specifier of the MOD-DEF0 (AKA Mod_ABS)
-  module presence input gpio signal, active (module absent) high
+  module presence input gpio signal, active (module absent) high. Must
+  not be present for SFF modules
 
 - los-gpios : GPIO phandle and a specifier of the Receiver Loss of Signal
   Indication input gpio signal, active (signal lost) high
@@ -24,10 +27,11 @@
 
 - rate-select0-gpios : GPIO phandle and a specifier of the Rx Signaling Rate
   Select (AKA RS0) output gpio signal, low: low Rx rate, high: high Rx rate
+  Must not be present for SFF modules
 
 - rate-select1-gpios : GPIO phandle and a specifier of the Tx Signaling Rate
   Select (AKA RS1) output gpio signal (SFP+ only), low: low Tx rate, high:
-  high Tx rate
+  high Tx rate. Must not be present for SFF modules
 
 Example #1: Direct serdes to SFP connection
 
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
new file mode 100644
index 0000000..270ea4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
@@ -0,0 +1,48 @@
+* Socionext AVE ethernet controller
+
+This describes the devicetree bindings for AVE ethernet controller
+implemented on Socionext UniPhier SoCs.
+
+Required properties:
+ - compatible: Should be
+	- "socionext,uniphier-pro4-ave4" : for Pro4 SoC
+	- "socionext,uniphier-pxs2-ave4" : for PXs2 SoC
+	- "socionext,uniphier-ld11-ave4" : for LD11 SoC
+	- "socionext,uniphier-ld20-ave4" : for LD20 SoC
+ - reg: Address where registers are mapped and size of region.
+ - interrupts: Should contain the MAC interrupt.
+ - phy-mode: See ethernet.txt in the same directory. Allow to choose
+	"rgmii", "rmii", or "mii" according to the PHY.
+ - phy-handle: Should point to the external phy device.
+	See ethernet.txt file in the same directory.
+ - clocks: A phandle to the clock for the MAC.
+
+Optional properties:
+ - resets: A phandle to the reset control for the MAC.
+ - local-mac-address: See ethernet.txt in the same directory.
+
+Required subnode:
+ - mdio: A container for child nodes representing phy nodes.
+         See phy.txt in the same directory.
+
+Example:
+
+	ether: ethernet@65000000 {
+		compatible = "socionext,uniphier-ld20-ave4";
+		reg = <0x65000000 0x8500>;
+		interrupts = <0 66 4>;
+		phy-mode = "rgmii";
+		phy-handle = <&ethphy>;
+		clocks = <&sys_clk 6>;
+		resets = <&sys_rst 6>;
+		local-mac-address = [00 00 00 00 00 00];
+
+		mdio {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			ethphy: ethphy@1 {
+				reg = <1>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
new file mode 100644
index 0000000..0cff94f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
@@ -0,0 +1,53 @@
+* Socionext NetSec Ethernet Controller IP
+
+Required properties:
+- compatible: Should be "socionext,synquacer-netsec"
+- reg: Address and length of the control register area, followed by the
+       address and length of the EEPROM holding the MAC address and
+       microengine firmware
+- interrupts: Should contain ethernet controller interrupt
+- clocks: phandle to the PHY reference clock
+- clock-names: Should be "phy_ref_clk"
+- phy-mode: See ethernet.txt file in the same directory
+- phy-handle: See ethernet.txt in the same directory.
+
+- mdio device tree subnode: When the Netsec has a phy connected to its local
+		mdio, there must be device tree subnode with the following
+		required properties:
+
+	- #address-cells: Must be <1>.
+	- #size-cells: Must be <0>.
+
+	For each phy on the mdio bus, there must be a node with the following
+	fields:
+	- compatible: Refer to phy.txt
+	- reg: phy id used to communicate to phy.
+
+Optional properties: (See ethernet.txt file in the same directory)
+- dma-coherent: Boolean property, must only be present if memory
+	accesses performed by the device are cache coherent.
+- local-mac-address: See ethernet.txt in the same directory.
+- mac-address: See ethernet.txt in the same directory.
+- max-speed: See ethernet.txt in the same directory.
+- max-frame-size: See ethernet.txt in the same directory.
+
+Example:
+	eth0: ethernet@522d0000 {
+		compatible = "socionext,synquacer-netsec";
+		reg = <0 0x522d0000 0x0 0x10000>, <0 0x10000000 0x0 0x10000>;
+		interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clk_netsec>;
+		clock-names = "phy_ref_clk";
+		phy-mode = "rgmii";
+		max-speed = <1000>;
+		max-frame-size = <9000>;
+		phy-handle = <&phy1>;
+
+		mdio {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			phy1: ethernet-phy@1 {
+				compatible = "ethernet-phy-ieee802.3-c22";
+				reg = <1>;
+			};
+		};
diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
similarity index 64%
rename from Documentation/devicetree/bindings/net/ti,wilink-st.txt
rename to Documentation/devicetree/bindings/net/ti-bluetooth.txt
index 1649c1f..6d03ff8 100644
--- a/Documentation/devicetree/bindings/net/ti,wilink-st.txt
+++ b/Documentation/devicetree/bindings/net/ti-bluetooth.txt
@@ -1,10 +1,18 @@
-TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
+Texas Instruments Bluetooth Chips
+---------------------------------
+
+This documents the binding structure and common properties for serial
+attached TI Bluetooth devices. The following chips are included in this
+binding:
+
+* TI CC256x Bluetooth devices
+* TI WiLink 7/8 (wl12xx/wl18xx) Shared Transport BT/FM/GPS devices
 
 TI WiLink devices have a UART interface for providing Bluetooth, FM radio,
 and GPS over what's called "shared transport". The shared transport is
 standard BT HCI protocol with additional channels for the other functions.
 
-These devices also have a separate WiFi interface as described in
+TI WiLink devices also have a separate WiFi interface as described in
 wireless/ti,wlcore.txt.
 
 This bindings follows the UART slave device binding in
@@ -12,6 +20,7 @@
 
 Required properties:
  - compatible: should be one of the following:
+    "ti,cc2560"
     "ti,wl1271-st"
     "ti,wl1273-st"
     "ti,wl1281-st"
@@ -32,6 +41,9 @@
    See ../clocks/clock-bindings.txt for details.
  - clock-names : Must include the following entry:
    "ext_clock" (External clock provided to the TI combo chip).
+ - nvmem-cells: phandle to nvmem data cell that contains a 6 byte BD address
+   with the most significant byte first (big-endian).
+ - nvmem-cell-names: "bd-address" (required when nvmem-cells is specified)
 
 Example:
 
@@ -43,5 +55,7 @@
 		enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
 		clocks = <&clk32k_wl18xx>;
 		clock-names = "ext_clock";
+		nvmem-cells = <&bd_address>;
+		nvmem-cell-names = "bd-address";
 	};
 };
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
new file mode 100644
index 0000000..0c17a0e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
@@ -0,0 +1,32 @@
+* MediaTek mt76xx devices
+
+This node provides properties for configuring the MediaTek mt76xx wireless
+device. The node is expected to be specified as a child node of the PCI
+controller to which the wireless chip is connected.
+
+Optional properties:
+
+- mac-address: See ethernet.txt in the parent directory
+- local-mac-address: See ethernet.txt in the parent directory
+- ieee80211-freq-limit: See ieee80211.txt
+- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
+
+Optional nodes:
+- led: Properties for a connected LED
+  Optional properties:
+    - led-sources: See Documentation/devicetree/bindings/leds/common.txt
+
+&pcie {
+	pcie0 {
+		wifi@0,0 {
+			compatible = "mediatek,mt76";
+			reg = <0x0000 0 0 0 0>;
+			ieee80211-freq-limit = <5000000 6000000>;
+			mediatek,mtd-eeprom = <&factory 0x8000>;
+
+			led {
+				led-sources = <2>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 74d7f0a..3d2a031 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -41,6 +41,9 @@
 - qcom,msi_addr: MSI interrupt address.
 - qcom,msi_base: Base value to add before writing MSI data into
 		MSI address register.
+- qcom,ath10k-calibration-variant: string to search for in the board-2.bin
+				   variant list with the same bus and device
+				   specific ids
 - qcom,ath10k-calibration-data : calibration data + board specific data
 				 as an array, the length can vary between
 				 hw versions.
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 7a79b35..f5d642c 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -228,6 +228,8 @@
 	- general info on X.25 development.
 x25-iface.txt
 	- description of the X.25 Packet Layer to LAPB device interface.
+xfrm_device.txt
+	- description of XFRM offload API
 xfrm_proc.txt
 	- description of the statistics package for XFRM.
 xfrm_sync.txt
diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst
index a342b2c..245fb6c 100644
--- a/Documentation/networking/batman-adv.rst
+++ b/Documentation/networking/batman-adv.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==========
 batman-adv
 ==========
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index b8b4075..25170ad 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -385,11 +385,6 @@
   avoid relying on what a previous software agent such as a bootloader/firmware
   may have previously configured.
 
-- set_addr: Some switches require the programming of the management interface's
-  Ethernet MAC address, switch drivers can also disable ageing of MAC addresses
-  on the management interface and "hardcode"/"force" this MAC address for the
-  CPU/management interface as an optimization
-
 PHY devices and link management
 -------------------------------
 
diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.txt
index 057e9fd..e74d8e1 100644
--- a/Documentation/networking/ieee802154.txt
+++ b/Documentation/networking/ieee802154.txt
@@ -97,6 +97,46 @@
  - void ieee802154_unregister_hw(struct ieee802154_hw *hw):
    freeing registered PHY
 
+ - void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
+                              u8 lqi):
+   telling 802.15.4 module there is a new received frame in the skb with
+   the RF Link Quality Indicator (LQI) from the hardware device
+
+ - void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
+                                 bool ifs_handling):
+   telling 802.15.4 module the frame in the skb is or going to be
+   transmitted through the hardware device
+
+The device driver must implement the following callbacks in the IEEE 802.15.4
+operations structure at least:
+struct ieee802154_ops {
+	...
+	int	(*start)(struct ieee802154_hw *hw);
+	void	(*stop)(struct ieee802154_hw *hw);
+	...
+	int	(*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb);
+	int	(*ed)(struct ieee802154_hw *hw, u8 *level);
+	int	(*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel);
+	...
+};
+
+ - int start(struct ieee802154_hw *hw):
+   handler that 802.15.4 module calls for the hardware device initialization.
+
+ - void stop(struct ieee802154_hw *hw):
+   handler that 802.15.4 module calls for the hardware device cleanup.
+
+ - int xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb):
+   handler that 802.15.4 module calls for each frame in the skb going to be
+   transmitted through the hardware device.
+
+ - int ed(struct ieee802154_hw *hw, u8 *level):
+   handler that 802.15.4 module calls for Energy Detection from the hardware
+   device.
+
+ - int set_channel(struct ieee802154_hw *hw, u8 page, u8 channel):
+   set radio for listening on specific channel of the hardware device.
+
 Moreover IEEE 802.15.4 device operations structure should be filled.
 
 Fake drivers
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 46c7e10..3f2c40d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -606,6 +606,7 @@
 	This time period will grow exponentially when more blackhole issues
 	get detected right after Fastopen is re-enabled and will reset to
 	initial value when the blackhole issue goes away.
+	0 to disable the blackhole detection.
 	By default, it is set to 1hr.
 
 tcp_syn_retries - INTEGER
diff --git a/Documentation/networking/kapi.rst b/Documentation/networking/kapi.rst
index 580289f..f03ae64 100644
--- a/Documentation/networking/kapi.rst
+++ b/Documentation/networking/kapi.rst
@@ -145,3 +145,27 @@
 
 .. kernel-doc:: drivers/net/phy/mdio_bus.c
    :internal:
+
+PHYLINK
+-------
+
+  PHYLINK interfaces traditional network drivers with PHYLIB, fixed-links,
+  and SFF modules (eg, hot-pluggable SFP) that may contain PHYs.  PHYLINK
+  provides management of the link state and link modes.
+
+.. kernel-doc:: include/linux/phylink.h
+   :internal:
+
+.. kernel-doc:: drivers/net/phy/phylink.c
+
+SFP support
+-----------
+
+.. kernel-doc:: drivers/net/phy/sfp-bus.c
+   :internal:
+
+.. kernel-doc:: include/linux/sfp.h
+   :internal:
+
+.. kernel-doc:: drivers/net/phy/sfp-bus.c
+   :export:
diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt
index 7413eb0..c77f9d5 100644
--- a/Documentation/networking/netdev-features.txt
+++ b/Documentation/networking/netdev-features.txt
@@ -163,3 +163,12 @@
 frames (such as bad FCS, etc).  This can be helpful when sniffing a link with
 bad packets on it.  Some NICs may receive more packets if also put into normal
 PROMISC mode.
+
+*  rx-gro-hw
+
+This requests that the NIC enables Hardware GRO (generic receive offload).
+Hardware GRO is basically the exact reverse of TSO, and is generally
+stricter than Hardware LRO.  A packet stream merged by Hardware GRO must
+be re-segmentable by GSO or TSO back to the exact original packet stream.
+Hardware GRO is dependent on RXCSUM since every packet successfully merged
+by hardware must also have the checksum verified by hardware.
diff --git a/Documentation/networking/xfrm_device.txt b/Documentation/networking/xfrm_device.txt
new file mode 100644
index 0000000..2d9d588c
--- /dev/null
+++ b/Documentation/networking/xfrm_device.txt
@@ -0,0 +1,132 @@
+
+===============================================
+XFRM device - offloading the IPsec computations
+===============================================
+Shannon Nelson <shannon.nelson@oracle.com>
+
+
+Overview
+========
+
+IPsec is a useful feature for securing network traffic, but the
+computational cost is high: a 10Gbps link can easily be brought down
+to under 1Gbps, depending on the traffic and link configuration.
+Luckily, there are NICs that offer a hardware based IPsec offload which
+can radically increase throughput and decrease CPU utilization.  The XFRM
+Device interface allows NIC drivers to offer to the stack access to the
+hardware offload.
+
+Userland access to the offload is typically through a system such as
+libreswan or KAME/raccoon, but the iproute2 'ip xfrm' command set can
+be handy when experimenting.  An example command might look something
+like this:
+
+  ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
+     reqid 0x07 replay-window 32 \
+     aead 'rfc4106(gcm(aes))' 0x44434241343332312423222114131211f4f3f2f1 128 \
+     sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
+     offload dev eth4 dir in
+
+Yes, that's ugly, but that's what shell scripts and/or libreswan are for.
+
+
+
+Callbacks to implement
+======================
+
+/* from include/linux/netdevice.h */
+struct xfrmdev_ops {
+	int	(*xdo_dev_state_add) (struct xfrm_state *x);
+	void	(*xdo_dev_state_delete) (struct xfrm_state *x);
+	void	(*xdo_dev_state_free) (struct xfrm_state *x);
+	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
+				       struct xfrm_state *x);
+};
+
+The NIC driver offering ipsec offload will need to implement these
+callbacks to make the offload available to the network stack's
+XFRM subsytem.  Additionally, the feature bits NETIF_F_HW_ESP and
+NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
+
+
+
+Flow
+====
+
+At probe time and before the call to register_netdev(), the driver should
+set up local data structures and XFRM callbacks, and set the feature bits.
+The XFRM code's listener will finish the setup on NETDEV_REGISTER.
+
+		adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
+		adapter->netdev->features |= NETIF_F_HW_ESP;
+		adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP;
+
+When new SAs are set up with a request for "offload" feature, the
+driver's xdo_dev_state_add() will be given the new SA to be offloaded
+and an indication of whether it is for Rx or Tx.  The driver should
+	- verify the algorithm is supported for offloads
+	- store the SA information (key, salt, target-ip, protocol, etc)
+	- enable the HW offload of the SA
+
+The driver can also set an offload_handle in the SA, an opaque void pointer
+that can be used to convey context into the fast-path offload requests.
+
+		xs->xso.offload_handle = context;
+
+
+When the network stack is preparing an IPsec packet for an SA that has
+been setup for offload, it first calls into xdo_dev_offload_ok() with
+the skb and the intended offload state to ask the driver if the offload
+will serviceable.  This can check the packet information to be sure the
+offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and
+return true of false to signify its support.
+
+When ready to send, the driver needs to inspect the Tx packet for the
+offload information, including the opaque context, and set up the packet
+send accordingly.
+
+		xs = xfrm_input_state(skb);
+		context = xs->xso.offload_handle;
+		set up HW for send
+
+The stack has already inserted the appropriate IPsec headers in the
+packet data, the offload just needs to do the encryption and fix up the
+header values.
+
+
+When a packet is received and the HW has indicated that it offloaded a
+decryption, the driver needs to add a reference to the decoded SA into
+the packet's skb.  At this point the data should be decrypted but the
+IPsec headers are still in the packet data; they are removed later up
+the stack in xfrm_input().
+
+	find and hold the SA that was used to the Rx skb
+		get spi, protocol, and destination IP from packet headers
+		xs = find xs from (spi, protocol, dest_IP)
+		xfrm_state_hold(xs);
+
+	store the state information into the skb
+		skb->sp = secpath_dup(skb->sp);
+		skb->sp->xvec[skb->sp->len++] = xs;
+		skb->sp->olen++;
+
+	indicate the success and/or error status of the offload
+		xo = xfrm_offload(skb);
+		xo->flags = CRYPTO_DONE;
+		xo->status = crypto_status;
+
+	hand the packet to napi_gro_receive() as usual
+
+
+When the SA is removed by the user, the driver's xdo_dev_state_delete()
+is asked to disable the offload.  Later, xdo_dev_state_free() is called
+from a garbage collection routine after all reference counts to the state
+have been removed and any remaining resources can be cleared for the
+offload state.  How these are used by the driver will depend on specific
+hardware needs.
+
+As a netdev is set to DOWN the XFRM stack's netdev listener will call
+xdo_dev_state_delete() and xdo_dev_state_free() on any remaining offloaded
+states.
+
+
diff --git a/Documentation/networking/xfrm_proc.txt b/Documentation/networking/xfrm_proc.txt
index d0d8baf..2eae619 100644
--- a/Documentation/networking/xfrm_proc.txt
+++ b/Documentation/networking/xfrm_proc.txt
@@ -5,13 +5,15 @@
 
 Transformation Statistics
 -------------------------
-xfrm_proc is a statistics shown factor dropped by transformation
-for developer.
-It is a counter designed from current transformation source code
-and defined like linux private MIB.
 
-Inbound statistics
-~~~~~~~~~~~~~~~~~~
+The xfrm_proc code is a set of statistics showing numbers of packets
+dropped by the transformation code and why.  These counters are defined
+as part of the linux private MIB.  These counters can be viewed in
+/proc/net/xfrm_stat.
+
+
+Inbound errors
+~~~~~~~~~~~~~~
 XfrmInError:
 	All errors which is not matched others
 XfrmInBufferError:
@@ -46,6 +48,10 @@
 	Policy discards
 XfrmInPolError:
 	Policy error
+XfrmAcquireError:
+	State hasn't been fully acquired before use
+XfrmFwdHdrError:
+	Forward routing of a packet is not allowed
 
 Outbound errors
 ~~~~~~~~~~~~~~~
@@ -72,3 +78,5 @@
 	Policy is dead
 XfrmOutPolError:
 	Policy error
+XfrmOutStateInvalid:
+	State is invalid, perhaps expired
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index b67044a..35c62f5 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -95,7 +95,9 @@
 --------------
 
 The maximum number of packets that kernel can handle on a NAPI interrupt,
-it's a Per-CPU variable.
+it's a Per-CPU variable. For drivers that support LRO or GRO_HW, a hardware
+aggregated packet is counted as one packet in this context.
+
 Default: 64
 
 dev_weight_rx_bias
diff --git a/MAINTAINERS b/MAINTAINERS
index d76af75..e22ca0a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2564,6 +2564,7 @@
 F:	Documentation/ABI/testing/sysfs-class-net-batman-adv
 F:	Documentation/ABI/testing/sysfs-class-net-mesh
 F:	Documentation/networking/batman-adv.rst
+F:	include/uapi/linux/batadv_packet.h
 F:	include/uapi/linux/batman_adv.h
 F:	net/batman-adv/
 
@@ -2687,7 +2688,6 @@
 
 BLUETOOTH DRIVERS
 M:	Marcel Holtmann <marcel@holtmann.org>
-M:	Gustavo Padovan <gustavo@padovan.org>
 M:	Johan Hedberg <johan.hedberg@gmail.com>
 L:	linux-bluetooth@vger.kernel.org
 W:	http://www.bluez.org/
@@ -2698,7 +2698,6 @@
 
 BLUETOOTH SUBSYSTEM
 M:	Marcel Holtmann <marcel@holtmann.org>
-M:	Gustavo Padovan <gustavo@padovan.org>
 M:	Johan Hedberg <johan.hedberg@gmail.com>
 L:	linux-bluetooth@vger.kernel.org
 W:	http://www.bluez.org/
@@ -2723,12 +2722,16 @@
 M:	Daniel Borkmann <daniel@iogearbox.net>
 L:	netdev@vger.kernel.org
 L:	linux-kernel@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
 S:	Supported
 F:	arch/x86/net/bpf_jit*
 F:	Documentation/networking/filter.txt
 F:	Documentation/bpf/
 F:	include/linux/bpf*
 F:	include/linux/filter.h
+F:	include/trace/events/bpf.h
+F:	include/trace/events/xdp.h
 F:	include/uapi/linux/bpf*
 F:	include/uapi/linux/filter.h
 F:	kernel/bpf/
@@ -4939,6 +4942,11 @@
 F:	lib/dynamic_debug.c
 F:	include/linux/dynamic_debug.h
 
+DYNAMIC INTERRUPT MODERATION
+M:	Tal Gilboa <talgi@mellanox.com>
+S:	Maintained
+F:	include/linux/net_dim.h
+
 DZ DECSTATION DZ11 SERIAL DRIVER
 M:	"Maciej W. Rozycki" <macro@linux-mips.org>
 S:	Maintained
@@ -8721,6 +8729,13 @@
 S:	Maintained
 F:	drivers/net/ethernet/mediatek/
 
+MEDIATEK SWITCH DRIVER
+M:	Sean Wang <sean.wang@mediatek.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/dsa/mt7530.*
+F:	net/dsa/tag_mtk.c
+
 MEDIATEK JPEG DRIVER
 M:	Rick Chang <rick.chang@mediatek.com>
 M:	Bin Liu <bin.liu@mediatek.com>
@@ -9597,6 +9612,11 @@
 L:	linux-wireless@vger.kernel.org
 Q:	http://patchwork.kernel.org/project/linux-wireless/list/
 
+NETDEVSIM
+M:	Jakub Kicinski <jakub.kicinski@netronome.com>
+S:	Maintained
+F:	drivers/net/netdevsim/*
+
 NETXEN (1/10) GbE SUPPORT
 M:	Manish Chopra <manish.chopra@cavium.com>
 M:	Rahul Verma <rahul.verma@cavium.com>
@@ -12627,6 +12647,13 @@
 F:	include/linux/raid/
 F:	include/uapi/linux/raid/
 
+SOCIONEXT (SNI) NETSEC NETWORK DRIVER
+M:	Jassi Brar <jaswinder.singh@linaro.org>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/ethernet/socionext/netsec.c
+F:	Documentation/devicetree/bindings/net/socionext-netsec.txt
+
 SONIC NETWORK DRIVER
 M:	Thomas Bogendoerfer <tsbogend@alpha.franken.de>
 L:	netdev@vger.kernel.org
diff --git a/arch/Kconfig b/arch/Kconfig
index 400b9e1b..d3f4aaf 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -196,6 +196,9 @@
 config HAVE_KPROBES_ON_FTRACE
 	bool
 
+config HAVE_KPROBE_OVERRIDE
+	bool
+
 config HAVE_NMI
 	bool
 
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 09ce8b8..fcaff1c 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -122,7 +122,7 @@
 			};
 
 			can1: can@43f88000 {
-				compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx25-flexcan";
 				reg = <0x43f88000 0x4000>;
 				interrupts = <43>;
 				clocks = <&clks 75>, <&clks 75>;
@@ -131,7 +131,7 @@
 			};
 
 			can2: can@43f8c000 {
-				compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx25-flexcan";
 				reg = <0x43f8c000 0x4000>;
 				interrupts = <44>;
 				clocks = <&clks 76>, <&clks 76>;
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 2f4ebe0..e52e05c 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -1038,7 +1038,7 @@
 			};
 
 			can0: can@80032000 {
-				compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx28-flexcan";
 				reg = <0x80032000 0x2000>;
 				interrupts = <8>;
 				clocks = <&clks 58>, <&clks 58>;
@@ -1047,7 +1047,7 @@
 			};
 
 			can1: can@80034000 {
-				compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx28-flexcan";
 				reg = <0x80034000 0x2000>;
 				interrupts = <9>;
 				clocks = <&clks 59>, <&clks 59>;
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 6d5e6a6..1f0e220 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -303,7 +303,7 @@
 			};
 
 			can1: can@53fe4000 {
-				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx35-flexcan";
 				reg = <0x53fe4000 0x1000>;
 				clocks = <&clks 33>, <&clks 33>;
 				clock-names = "ipg", "per";
@@ -312,7 +312,7 @@
 			};
 
 			can2: can@53fe8000 {
-				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx35-flexcan";
 				reg = <0x53fe8000 0x1000>;
 				clocks = <&clks 34>, <&clks 34>;
 				clock-names = "ipg", "per";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 84f17f7..85071ff 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -536,7 +536,7 @@
 			};
 
 			can1: can@53fc8000 {
-				compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx53-flexcan";
 				reg = <0x53fc8000 0x4000>;
 				interrupts = <82>;
 				clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
@@ -546,7 +546,7 @@
 			};
 
 			can2: can@53fcc000 {
-				compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+				compatible = "fsl,imx53-flexcan";
 				reg = <0x53fcc000 0x4000>;
 				interrupts = <83>;
 				clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 67b4de0..7bb402d 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -331,3 +331,19 @@
 &uart1 {
 	status = "okay";
 };
+
+&can0 {
+	status = "okay";
+};
+
+&can1 {
+	status = "okay";
+};
+
+&can2 {
+	status = "disabled";
+};
+
+&can3 {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index 44715c8..860b898 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -243,3 +243,19 @@
 &uart1 {
 	status = "okay";
 };
+
+&can0 {
+	status = "okay";
+};
+
+&can1 {
+	status = "okay";
+};
+
+&can2 {
+	status = "disabled";
+};
+
+&can3 {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 9319e1f..7789031 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -730,5 +730,41 @@
 					<0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
 					<0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
 		};
+
+		can0: can@2a70000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2a70000 0x0 0x1000>;
+			interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
+
+		can1: can@2a80000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2a80000 0x0 0x1000>;
+			interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
+
+		can2: can@2a90000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2a90000 0x0 0x1000>;
+			interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
+
+		can3: can@2aa0000 {
+			compatible = "fsl,ls1021ar2-flexcan";
+			reg = <0x0 0x2aa0000 0x0 0x1000>;
+			interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+			clock-names = "ipg", "per";
+			big-endian;
+		};
 	};
 };
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c199990..4425189 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1824,7 +1824,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	/* If BPF JIT was not enabled then we must fall back to
 	 * the interpreter.
 	 */
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	/* If constant blinding was enabled and we failed during blinding
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index ba38d40..acaa935e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -99,6 +99,20 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val,
 	}
 }
 
+static inline void emit_addr_mov_i64(const int reg, const u64 val,
+				     struct jit_ctx *ctx)
+{
+	u64 tmp = val;
+	int shift = 0;
+
+	emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
+	for (;shift < 48;) {
+		tmp >>= 16;
+		shift += 16;
+		emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
+	}
+}
+
 static inline void emit_a64_mov_i(const int is64, const int reg,
 				  const s32 val, struct jit_ctx *ctx)
 {
@@ -603,7 +617,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		const u8 r0 = bpf2a64[BPF_REG_0];
 		const u64 func = (u64)__bpf_call_base + imm;
 
-		emit_a64_mov_i64(tmp, func, ctx);
+		if (ctx->prog->is_func)
+			emit_addr_mov_i64(tmp, func, ctx);
+		else
+			emit_a64_mov_i64(tmp, func, ctx);
 		emit(A64_BLR(tmp), ctx);
 		emit(A64_MOV(1, r0, A64_R(0)), ctx);
 		break;
@@ -835,16 +852,24 @@ static inline void bpf_flush_icache(void *start, void *end)
 	flush_icache_range((unsigned long)start, (unsigned long)end);
 }
 
+struct arm64_jit_data {
+	struct bpf_binary_header *header;
+	u8 *image;
+	struct jit_ctx ctx;
+};
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	struct bpf_prog *tmp, *orig_prog = prog;
 	struct bpf_binary_header *header;
+	struct arm64_jit_data *jit_data;
 	bool tmp_blinded = false;
+	bool extra_pass = false;
 	struct jit_ctx ctx;
 	int image_size;
 	u8 *image_ptr;
 
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	tmp = bpf_jit_blind_constants(prog);
@@ -858,13 +883,30 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = tmp;
 	}
 
+	jit_data = prog->aux->jit_data;
+	if (!jit_data) {
+		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+		if (!jit_data) {
+			prog = orig_prog;
+			goto out;
+		}
+		prog->aux->jit_data = jit_data;
+	}
+	if (jit_data->ctx.offset) {
+		ctx = jit_data->ctx;
+		image_ptr = jit_data->image;
+		header = jit_data->header;
+		extra_pass = true;
+		image_size = sizeof(u32) * ctx.idx;
+		goto skip_init_ctx;
+	}
 	memset(&ctx, 0, sizeof(ctx));
 	ctx.prog = prog;
 
 	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
 	if (ctx.offset == NULL) {
 		prog = orig_prog;
-		goto out;
+		goto out_off;
 	}
 
 	/* 1. Initial fake pass to compute ctx->idx. */
@@ -895,6 +937,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	/* 2. Now, the actual pass. */
 
 	ctx.image = (__le32 *)image_ptr;
+skip_init_ctx:
 	ctx.idx = 0;
 
 	build_prologue(&ctx);
@@ -920,13 +963,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	bpf_flush_icache(header, ctx.image + ctx.idx);
 
-	bpf_jit_binary_lock_ro(header);
+	if (!prog->is_func || extra_pass) {
+		if (extra_pass && ctx.idx != jit_data->ctx.idx) {
+			pr_err_once("multi-func JIT bug %d != %d\n",
+				    ctx.idx, jit_data->ctx.idx);
+			bpf_jit_binary_free(header);
+			prog->bpf_func = NULL;
+			prog->jited = 0;
+			goto out_off;
+		}
+		bpf_jit_binary_lock_ro(header);
+	} else {
+		jit_data->ctx = ctx;
+		jit_data->image = image_ptr;
+		jit_data->header = header;
+	}
 	prog->bpf_func = (void *)ctx.image;
 	prog->jited = 1;
 	prog->jited_len = image_size;
 
+	if (!prog->is_func || extra_pass) {
 out_off:
-	kfree(ctx.offset);
+		kfree(ctx.offset);
+		kfree(jit_data);
+		prog->aux->jit_data = NULL;
+	}
 out:
 	if (tmp_blinded)
 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index e828477..07b4c65 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -265,6 +265,14 @@ int __init bcm63xx_enet_register(int unit,
 		dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
 	}
 
+	if (unit == 0) {
+		dpd->rx_chan = 0;
+		dpd->tx_chan = 1;
+	} else {
+		dpd->rx_chan = 2;
+		dpd->tx_chan = 3;
+	}
+
 	ret = platform_device_register(pdev);
 	if (ret)
 		return ret;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index c0bd474..da39e4d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -55,6 +55,10 @@ struct bcm63xx_enet_platform_data {
 
 	/* DMA descriptor shift */
 	unsigned int dma_desc_shift;
+
+	/* dma channel ids */
+	int rx_chan;
+	int tx_chan;
 };
 
 /*
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 962b025..97069a1 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1869,7 +1869,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	unsigned int image_size;
 	u8 *image_ptr;
 
-	if (!bpf_jit_enable || !cpu_has_mips64r2)
+	if (!prog->jit_requested || !cpu_has_mips64r2)
 		return prog;
 
 	tmp = bpf_jit_blind_constants(prog);
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index af12ead..1b4aafc 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -137,12 +137,14 @@
 		compatible = "fsl,p1010-flexcan";
 		reg = <0x1c000 0x1000>;
 		interrupts = <48 0x2 0 0>;
+		big-endian;
 	};
 
 	can1: can@1d000 {
 		compatible = "fsl,p1010-flexcan";
 		reg = <0x1d000 0x1000>;
 		interrupts = <61 0x2 0 0>;
+		big-endian;
 	};
 
 	L2: l2-cache-controller@20000 {
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index d183b48..6771c63 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -995,7 +995,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 	struct bpf_prog *tmp_fp;
 	bool bpf_blinded = false;
 
-	if (!bpf_jit_enable)
+	if (!fp->jit_requested)
 		return org_fp;
 
 	tmp_fp = bpf_jit_blind_constants(org_fp);
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 6db7856..cdbaad5 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -229,13 +229,55 @@ struct diag204_x_phys_block {
 } __packed;
 
 enum diag26c_sc {
+	DIAG26C_PORT_VNIC    = 0x00000024,
 	DIAG26C_MAC_SERVICES = 0x00000030
 };
 
 enum diag26c_version {
-	DIAG26C_VERSION2 = 0x00000002	/* z/VM 5.4.0 */
+	DIAG26C_VERSION2	 = 0x00000002,	/* z/VM 5.4.0 */
+	DIAG26C_VERSION6_VM65918 = 0x00020006	/* z/VM 6.4.0 + VM65918 */
 };
 
+#define DIAG26C_VNIC_INFO	0x0002
+struct diag26c_vnic_req {
+	u32	resp_buf_len;
+	u32	resp_version;
+	u16	req_format;
+	u16	vlan_id;
+	u64	sys_name;
+	u8	res[2];
+	u16	devno;
+} __packed __aligned(8);
+
+#define VNIC_INFO_PROT_L3	1
+#define VNIC_INFO_PROT_L2	2
+/* Note: this is the bare minimum, use it for uninitialized VNICs only. */
+struct diag26c_vnic_resp {
+	u32	version;
+	u32	entry_cnt;
+	/* VNIC info: */
+	u32	next_entry;
+	u64	owner;
+	u16	devno;
+	u8	status;
+	u8	type;
+	u64	lan_owner;
+	u64	lan_name;
+	u64	port_name;
+	u8	port_type;
+	u8	ext_status:6;
+	u8	protocol:2;
+	u16	base_devno;
+	u32	port_num;
+	u32	ifindex;
+	u32	maxinfo;
+	u32	dev_count;
+	/* 3x device info: */
+	u8	dev_info1[28];
+	u8	dev_info2[28];
+	u8	dev_info3[28];
+} __packed __aligned(8);
+
 #define DIAG26C_GET_MAC	0x0000
 struct diag26c_mac_req {
 	u32	resp_buf_len;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9557d8b..1dfadbd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1299,7 +1299,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 	struct bpf_jit jit;
 	int pass;
 
-	if (!bpf_jit_enable)
+	if (!fp->jit_requested)
 		return orig_fp;
 
 	tmp = bpf_jit_blind_constants(fp);
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index 7291e2f..4d6be53 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -79,7 +79,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh7763_eth_pdata = {
 	.phy = 0,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 0104c81..1bde08d 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -76,7 +76,6 @@ static struct resource sh_eth0_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth0_pdata = {
 	.phy = 1,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_set_mdio_gate,
 };
 
@@ -104,7 +103,6 @@ static struct resource sh_eth1_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth1_pdata = {
 	.phy = 1,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_set_mdio_gate,
 };
 
@@ -148,7 +146,6 @@ static struct resource sh_eth_giga0_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
 	.phy = 18,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
 	.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
 };
@@ -182,7 +179,6 @@ static struct resource sh_eth_giga1_resources[] = {
 
 static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
 	.phy = 19,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
 	.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
 };
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 1faf6cb..6f929ab 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh_eth_plat = {
 	.phy = 0x1f, /* SMSC LAN8700 */
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 	.ether_link_active_low = 1
 };
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index f1fecd3..2559525 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -374,7 +374,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh_eth_plat = {
 	.phy = 0x1f, /* SMSC LAN8187 */
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 2c8fb04..6e62686 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -87,7 +87,6 @@ static struct resource sh_eth_resources[] = {
 
 static struct sh_eth_plat_data sh7763_eth_pdata = {
 	.phy = 1,
-	.edmac_endian = EDMAC_LITTLE_ENDIAN,
 	.phy_interface = PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index 95796ad..d08db08 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -122,7 +122,6 @@ static struct platform_device scif2_device = {
 
 static struct sh_eth_plat_data eth_platform_data = {
 	.phy		= 1,
-	.edmac_endian	= EDMAC_LITTLE_ENDIAN,
 	.phy_interface	= PHY_INTERFACE_MODE_MII,
 };
 
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index ff5f9cb..635fdef 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1509,17 +1509,25 @@ static void jit_fill_hole(void *area, unsigned int size)
 		*ptr++ = 0x91d02005; /* ta 5 */
 }
 
+struct sparc64_jit_data {
+	struct bpf_binary_header *header;
+	u8 *image;
+	struct jit_ctx ctx;
+};
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	struct bpf_prog *tmp, *orig_prog = prog;
+	struct sparc64_jit_data *jit_data;
 	struct bpf_binary_header *header;
 	bool tmp_blinded = false;
+	bool extra_pass = false;
 	struct jit_ctx ctx;
 	u32 image_size;
 	u8 *image_ptr;
 	int pass;
 
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	tmp = bpf_jit_blind_constants(prog);
@@ -1533,13 +1541,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = tmp;
 	}
 
+	jit_data = prog->aux->jit_data;
+	if (!jit_data) {
+		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+		if (!jit_data) {
+			prog = orig_prog;
+			goto out;
+		}
+		prog->aux->jit_data = jit_data;
+	}
+	if (jit_data->ctx.offset) {
+		ctx = jit_data->ctx;
+		image_ptr = jit_data->image;
+		header = jit_data->header;
+		extra_pass = true;
+		image_size = sizeof(u32) * ctx.idx;
+		goto skip_init_ctx;
+	}
+
 	memset(&ctx, 0, sizeof(ctx));
 	ctx.prog = prog;
 
 	ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
 	if (ctx.offset == NULL) {
 		prog = orig_prog;
-		goto out;
+		goto out_off;
 	}
 
 	/* Fake pass to detect features used, and get an accurate assessment
@@ -1562,7 +1588,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 	}
 
 	ctx.image = (u32 *)image_ptr;
-
+skip_init_ctx:
 	for (pass = 1; pass < 3; pass++) {
 		ctx.idx = 0;
 
@@ -1593,14 +1619,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
 
-	bpf_jit_binary_lock_ro(header);
+	if (!prog->is_func || extra_pass) {
+		bpf_jit_binary_lock_ro(header);
+	} else {
+		jit_data->ctx = ctx;
+		jit_data->image = image_ptr;
+		jit_data->header = header;
+	}
 
 	prog->bpf_func = (void *)ctx.image;
 	prog->jited = 1;
 	prog->jited_len = image_size;
 
+	if (!prog->is_func || extra_pass) {
 out_off:
-	kfree(ctx.offset);
+		kfree(ctx.offset);
+		kfree(jit_data);
+		prog->aux->jit_data = NULL;
+	}
 out:
 	if (tmp_blinded)
 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d4fc98c..45dc623 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -154,6 +154,7 @@
 	select HAVE_KERNEL_XZ
 	select HAVE_KPROBES
 	select HAVE_KPROBES_ON_FTRACE
+	select HAVE_KPROBE_OVERRIDE
 	select HAVE_KRETPROBES
 	select HAVE_KVM
 	select HAVE_LIVEPATCH			if X86_64
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 9f2e310..36abb23 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -67,6 +67,10 @@ extern const int kretprobe_blacklist_size;
 void arch_remove_kprobe(struct kprobe *p);
 asmlinkage void kretprobe_trampoline(void);
 
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern void arch_ftrace_kprobe_override_function(struct pt_regs *regs);
+#endif
+
 /* Architecture specific copy of original instruction*/
 struct arch_specific_insn {
 	/* copy of the original instruction */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 14131dd..6de1fd3 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -109,6 +109,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
 	return regs->ax;
 }
 
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+	regs->ax = rc;
+}
+
 /*
  * user_mode(regs) determines whether a register set came from user
  * mode.  On x86_32, this is true if V8086 mode was enabled OR if the
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 8dc0161..1ea748d 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -97,3 +97,17 @@ int arch_prepare_kprobe_ftrace(struct kprobe *p)
 	p->ainsn.boostable = false;
 	return 0;
 }
+
+asmlinkage void override_func(void);
+asm(
+	".type override_func, @function\n"
+	"override_func:\n"
+	"	ret\n"
+	".size override_func, .-override_func\n"
+);
+
+void arch_ftrace_kprobe_override_function(struct pt_regs *regs)
+{
+	regs->ip = (unsigned long)&override_func;
+}
+NOKPROBE_SYMBOL(arch_ftrace_kprobe_override_function);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 0554e8a..87f214f 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1109,19 +1109,29 @@ xadd:			if (is_imm8(insn->off))
 	return proglen;
 }
 
+struct x64_jit_data {
+	struct bpf_binary_header *header;
+	int *addrs;
+	u8 *image;
+	int proglen;
+	struct jit_context ctx;
+};
+
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
 	struct bpf_binary_header *header = NULL;
 	struct bpf_prog *tmp, *orig_prog = prog;
+	struct x64_jit_data *jit_data;
 	int proglen, oldproglen = 0;
 	struct jit_context ctx = {};
 	bool tmp_blinded = false;
+	bool extra_pass = false;
 	u8 *image = NULL;
 	int *addrs;
 	int pass;
 	int i;
 
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return orig_prog;
 
 	tmp = bpf_jit_blind_constants(prog);
@@ -1135,10 +1145,28 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = tmp;
 	}
 
+	jit_data = prog->aux->jit_data;
+	if (!jit_data) {
+		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+		if (!jit_data) {
+			prog = orig_prog;
+			goto out;
+		}
+		prog->aux->jit_data = jit_data;
+	}
+	addrs = jit_data->addrs;
+	if (addrs) {
+		ctx = jit_data->ctx;
+		oldproglen = jit_data->proglen;
+		image = jit_data->image;
+		header = jit_data->header;
+		extra_pass = true;
+		goto skip_init_addrs;
+	}
 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
 	if (!addrs) {
 		prog = orig_prog;
-		goto out;
+		goto out_addrs;
 	}
 
 	/* Before first pass, make a rough estimation of addrs[]
@@ -1149,6 +1177,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		addrs[i] = proglen;
 	}
 	ctx.cleanup_addr = proglen;
+skip_init_addrs:
 
 	/* JITed image shrinks with every pass and the loop iterates
 	 * until the image stops shrinking. Very large bpf programs
@@ -1189,7 +1218,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
 	if (image) {
 		bpf_flush_icache(header, image + proglen);
-		bpf_jit_binary_lock_ro(header);
+		if (!prog->is_func || extra_pass) {
+			bpf_jit_binary_lock_ro(header);
+		} else {
+			jit_data->addrs = addrs;
+			jit_data->ctx = ctx;
+			jit_data->proglen = proglen;
+			jit_data->image = image;
+			jit_data->header = header;
+		}
 		prog->bpf_func = (void *)image;
 		prog->jited = 1;
 		prog->jited_len = proglen;
@@ -1197,8 +1234,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 		prog = orig_prog;
 	}
 
+	if (!prog->is_func || extra_pass) {
 out_addrs:
-	kfree(addrs);
+		kfree(addrs);
+		kfree(jit_data);
+		prog->aux->jit_data = NULL;
+	}
 out:
 	if (tmp_blinded)
 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index ce47eb1..6470e3c 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -473,7 +473,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
 		ENI_PRV_POS(skb) = eni_vcc->descr+size+1;
 		skb_queue_tail(&eni_dev->rx_queue,skb);
 		eni_vcc->last = skb;
-rx_enqueued++;
+		rx_enqueued++;
 	}
 	eni_vcc->descr = here;
 	eni_out(dma_wr,MID_DMA_WR_RX);
@@ -715,7 +715,7 @@ static void get_service(struct atm_dev *dev)
 			else eni_dev->slow = vcc;
 			eni_dev->last_slow = vcc;
 		}
-putting++;
+		putting++;
 		ENI_VCC(vcc)->servicing++;
 	}
 }
@@ -744,7 +744,7 @@ static void dequeue_rx(struct atm_dev *dev)
 		}
 		EVENT("dequeued (size=%ld,pos=0x%lx)\n",ENI_PRV_SIZE(skb),
 		    ENI_PRV_POS(skb));
-rx_dequeued++;
+		rx_dequeued++;
 		vcc = ATM_SKB(skb)->vcc;
 		eni_vcc = ENI_VCC(vcc);
 		first = 0;
@@ -1174,7 +1174,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
 	DPRINTK("dma_wr set to %d, tx_pos is now %ld\n",dma_wr,tx->tx_pos);
 	eni_out(dma_wr,MID_DMA_WR_TX);
 	skb_queue_tail(&eni_dev->tx_queue,skb);
-queued++;
+	queued++;
 	return enq_ok;
 }
 
@@ -1195,7 +1195,7 @@ static void poll_tx(struct atm_dev *dev)
 				if (res == enq_ok) continue;
 				DPRINTK("re-queuing TX PDU\n");
 				skb_queue_head(&tx->backlog,skb);
-requeued++;
+				requeued++;
 				if (res == enq_jam) return;
 				break;
 			}
@@ -1232,7 +1232,7 @@ static void dequeue_tx(struct atm_dev *dev)
 		else dev_kfree_skb_irq(skb);
 		atomic_inc(&vcc->stats->tx);
 		wake_up(&eni_dev->tx_wait);
-dma_complete++;
+		dma_complete++;
 	}
 }
 
@@ -1555,7 +1555,7 @@ static void eni_tasklet(unsigned long data)
 	}
 	if (events & MID_TX_COMPLETE) {
 		EVENT("INT: TX COMPLETE\n",0,0);
-tx_complete++;
+		tx_complete++;
 		wake_up(&eni_dev->tx_wait);
 		/* poll_rx ? */
 	}
@@ -2069,14 +2069,14 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
 		}
 		*(u32 *) skb->data = htonl(*(u32 *) skb->data);
 	}
-submitted++;
+	submitted++;
 	ATM_SKB(skb)->vcc = vcc;
 	tasklet_disable(&ENI_DEV(vcc->dev)->task);
 	res = do_tx(skb);
 	tasklet_enable(&ENI_DEV(vcc->dev)->task);
 	if (res == enq_ok) return 0;
 	skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb);
-backlogged++;
+	backlogged++;
 	tasklet_schedule(&ENI_DEV(vcc->dev)->task);
 	return 0;
 }
diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c
index b1a6e32..cf889fc 100644
--- a/drivers/bcma/driver_pcie2.c
+++ b/drivers/bcma/driver_pcie2.c
@@ -83,7 +83,8 @@ static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
 		bcma_core_pcie2_set_ltr_vals(pcie2);
 
 		/* TODO:
-		si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0); */
+		 *si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
+		 */
 
 		/* enable the LTR */
 		devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 60e1c7d..07e55cd 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -31,6 +31,16 @@
 	  Say Y here to compile support for Bluetooth USB devices into the
 	  kernel or say M to compile it as module (btusb).
 
+config BT_HCIBTUSB_AUTOSUSPEND
+	bool "Enable USB autosuspend for Bluetooth USB devices by default"
+	depends on BT_HCIBTUSB
+	help
+	  Say Y here to enable USB autosuspend for Bluetooth USB devices by
+	  default.
+
+	  This can be overridden by passing btusb.enable_autosuspend=[y|n]
+	  on the kernel commandline.
+
 config BT_HCIBTUSB_BCM
 	bool "Broadcom protocol support"
 	depends on BT_HCIBTUSB
@@ -67,6 +77,7 @@
 config BT_HCIUART
 	tristate "HCI UART driver"
 	depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS
+	depends on NVMEM || !NVMEM
 	depends on TTY
 	help
 	  Bluetooth HCI UART driver.
@@ -97,6 +108,7 @@
 	tristate "UART Nokia H4+ protocol support"
 	depends on BT_HCIUART
 	depends on BT_HCIUART_SERDEV
+	depends on GPIOLIB
 	depends on PM
 	select BT_HCIUART_H4
 	select BT_BCM
@@ -158,6 +170,7 @@
 config BT_HCIUART_INTEL
 	bool "Intel protocol support"
 	depends on BT_HCIUART
+	depends on GPIOLIB
 	select BT_HCIUART_H4
 	select BT_INTEL
 	help
@@ -171,6 +184,7 @@
 	depends on BT_HCIUART
 	depends on BT_HCIUART_SERDEV
 	depends on (!ACPI || SERIAL_DEV_CTRL_TTYPORT)
+	depends on GPIOLIB
 	select BT_HCIUART_H4
 	select BT_BCM
 	help
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d513ef4..82437a6 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -302,9 +302,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
 			}
 
 			/* Wait until the command reaches the baseband */
-			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
-			schedule_timeout(HZ/10);
-			finish_wait(&wq, &wait);
+			mdelay(100);
 
 			/* Set baud on baseband */
 			info->ctrl_reg &= ~0x03;
@@ -316,9 +314,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
 			outb(info->ctrl_reg, iobase + REG_CONTROL);
 
 			/* Wait before the next HCI packet can be send */
-			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
-			schedule_timeout(HZ);
-			finish_wait(&wq, &wait);
+			mdelay(1000);
 		}
 
 		if (len == skb->len) {
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 7971bfb..801ea4c 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -259,7 +259,7 @@ static int bpa10x_flush(struct hci_dev *hdev)
 
 static int bpa10x_setup(struct hci_dev *hdev)
 {
-	const u8 req[] = { 0x07 };
+	static const u8 req[] = { 0x07 };
 	struct sk_buff *skb;
 
 	BT_DBG("%s", hdev->name);
diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h
index d9e6b41..cfe6ad4 100644
--- a/drivers/bluetooth/btbcm.h
+++ b/drivers/bluetooth/btbcm.h
@@ -44,8 +44,8 @@ struct bcm_set_sleep_mode {
 	__u8 tristate_control;
 	__u8 usb_auto_sleep;
 	__u8 usb_resume_timeout;
-	__u8 pulsed_host_wake;
 	__u8 break_to_host;
+	__u8 pulsed_host_wake;
 } __packed;
 
 struct bcm_set_pcm_int_params {
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 4459555..07f00e4 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(btintel_check_bdaddr);
 
 int btintel_enter_mfg(struct hci_dev *hdev)
 {
-	const u8 param[] = { 0x01, 0x00 };
+	static const u8 param[] = { 0x01, 0x00 };
 	struct sk_buff *skb;
 
 	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT);
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 663bed6..2c9a5fc 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -88,7 +88,8 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
 		break;
 	}
 
-	kfree_skb(skb);
+	if (!ret)
+		kfree_skb(skb);
 
 	return ret;
 }
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index c8e945d..20142bc7 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -31,6 +31,7 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 
+#include <linux/mmc/host.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio_func.h>
 
@@ -292,6 +293,14 @@ static int btsdio_probe(struct sdio_func *func,
 		tuple = tuple->next;
 	}
 
+	/* BCM43341 devices soldered onto the PCB (non-removable) use an
+	 * uart connection for bluetooth, ignore the BT SDIO interface.
+	 */
+	if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
+	    func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
+	    !mmc_card_is_removable(func->card->host))
+		return -ENODEV;
+
 	data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f7120c9..29977eb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -23,6 +23,7 @@
 
 #include <linux/module.h>
 #include <linux/usb.h>
+#include <linux/usb/quirks.h>
 #include <linux/firmware.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
@@ -40,6 +41,7 @@
 
 static bool disable_scofix;
 static bool force_scofix;
+static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND);
 
 static bool reset = true;
 
@@ -263,6 +265,7 @@ static const struct usb_device_id blacklist_table[] = {
 	/* QCA ROME chipset */
 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+	{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
@@ -270,6 +273,7 @@ static const struct usb_device_id blacklist_table[] = {
 	{ USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+	{ USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
 	{ USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
 
 	/* Broadcom BCM2035 */
@@ -387,9 +391,8 @@ static const struct usb_device_id blacklist_table[] = {
 #define BTUSB_FIRMWARE_LOADED	7
 #define BTUSB_FIRMWARE_FAILED	8
 #define BTUSB_BOOTING		9
-#define BTUSB_RESET_RESUME	10
-#define BTUSB_DIAG_RUNNING	11
-#define BTUSB_OOB_WAKE_ENABLED	12
+#define BTUSB_DIAG_RUNNING	10
+#define BTUSB_OOB_WAKE_ENABLED	11
 
 struct btusb_data {
 	struct hci_dev       *hdev;
@@ -3120,9 +3123,9 @@ static int btusb_probe(struct usb_interface *intf,
 
 		/* QCA Rome devices lose their updated firmware over suspend,
 		 * but the USB hub doesn't notice any status change.
-		 * Explicitly request a device reset on resume.
+		 * explicitly request a device reset on resume.
 		 */
-		set_bit(BTUSB_RESET_RESUME, &data->flags);
+		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
 	}
 
 #ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3133,7 +3136,7 @@ static int btusb_probe(struct usb_interface *intf,
 		 * but the USB hub doesn't notice any status change.
 		 * Explicitly request a device reset on resume.
 		 */
-		set_bit(BTUSB_RESET_RESUME, &data->flags);
+		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
 	}
 #endif
 
@@ -3213,6 +3216,9 @@ static int btusb_probe(struct usb_interface *intf,
 	}
 #endif
 
+	if (enable_autosuspend)
+		usb_enable_autosuspend(data->udev);
+
 	err = hci_register_dev(hdev);
 	if (err < 0)
 		goto out_free_dev;
@@ -3299,14 +3305,6 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
 		enable_irq(data->oob_wake_irq);
 	}
 
-	/* Optionally request a device reset on resume, but only when
-	 * wakeups are disabled. If wakeups are enabled we assume the
-	 * device will stay powered up throughout suspend.
-	 */
-	if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
-	    !device_may_wakeup(&data->udev->dev))
-		data->udev->reset_resume = 1;
-
 	return 0;
 }
 
@@ -3425,6 +3423,9 @@ MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
 module_param(force_scofix, bool, 0644);
 MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
 
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default");
+
 module_param(reset, bool, 0644);
 MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
 
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 707c2d1..64800cd 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -29,6 +29,7 @@
 #include <linux/acpi.h>
 #include <linux/of.h>
 #include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/gpio/consumer.h>
@@ -52,7 +53,37 @@
 
 #define BCM_AUTOSUSPEND_DELAY	5000 /* default autosleep delay */
 
-/* device driver resources */
+/**
+ * struct bcm_device - device driver resources
+ * @serdev_hu: HCI UART controller struct
+ * @list: bcm_device_list node
+ * @dev: physical UART slave
+ * @name: device name logged by bt_dev_*() functions
+ * @device_wakeup: BT_WAKE pin,
+ *	assert = Bluetooth device must wake up or remain awake,
+ *	deassert = Bluetooth device may sleep when sleep criteria are met
+ * @shutdown: BT_REG_ON pin,
+ *	power up or power down Bluetooth device internal regulators
+ * @set_device_wakeup: callback to toggle BT_WAKE pin
+ *	either by accessing @device_wakeup or by calling @btlp
+ * @set_shutdown: callback to toggle BT_REG_ON pin
+ *	either by accessing @shutdown or by calling @btpu/@btpd
+ * @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power")
+ * @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up")
+ * @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down")
+ * @clk: clock used by Bluetooth device
+ * @clk_enabled: whether @clk is prepared and enabled
+ * @init_speed: default baudrate of Bluetooth device;
+ *	the host UART is initially set to this baudrate so that
+ *	it can configure the Bluetooth device for @oper_speed
+ * @oper_speed: preferred baudrate of Bluetooth device;
+ *	set to 0 if @init_speed is already the preferred baudrate
+ * @irq: interrupt triggered by HOST_WAKE_BT pin
+ * @irq_active_low: whether @irq is active low
+ * @hu: pointer to HCI UART controller struct,
+ *	used to disable flow control during runtime suspend and system sleep
+ * @is_suspended: whether flow control is currently disabled
+ */
 struct bcm_device {
 	/* Must be the first member, hci_serdev.c expects this. */
 	struct hci_uart		serdev_hu;
@@ -63,6 +94,11 @@ struct bcm_device {
 	const char		*name;
 	struct gpio_desc	*device_wakeup;
 	struct gpio_desc	*shutdown;
+	int			(*set_device_wakeup)(struct bcm_device *, bool);
+	int			(*set_shutdown)(struct bcm_device *, bool);
+#ifdef CONFIG_ACPI
+	acpi_handle		btlp, btpu, btpd;
+#endif
 
 	struct clk		*clk;
 	bool			clk_enabled;
@@ -74,7 +110,7 @@ struct bcm_device {
 
 #ifdef CONFIG_PM
 	struct hci_uart		*hu;
-	bool			is_suspended; /* suspend/resume flag */
+	bool			is_suspended;
 #endif
 };
 
@@ -170,11 +206,21 @@ static bool bcm_device_exists(struct bcm_device *device)
 
 static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
 {
-	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
-		clk_prepare_enable(dev->clk);
+	int err;
 
-	gpiod_set_value(dev->shutdown, powered);
-	gpiod_set_value(dev->device_wakeup, powered);
+	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) {
+		err = clk_prepare_enable(dev->clk);
+		if (err)
+			return err;
+	}
+
+	err = dev->set_shutdown(dev, powered);
+	if (err)
+		goto err_clk_disable;
+
+	err = dev->set_device_wakeup(dev, powered);
+	if (err)
+		goto err_revert_shutdown;
 
 	if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
 		clk_disable_unprepare(dev->clk);
@@ -182,6 +228,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
 	dev->clk_enabled = powered;
 
 	return 0;
+
+err_revert_shutdown:
+	dev->set_shutdown(dev, !powered);
+err_clk_disable:
+	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+		clk_disable_unprepare(dev->clk);
+	return err;
 }
 
 #ifdef CONFIG_PM
@@ -191,9 +244,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
 
 	bt_dev_dbg(bdev, "Host wake IRQ");
 
-	pm_runtime_get(bdev->dev);
-	pm_runtime_mark_last_busy(bdev->dev);
-	pm_runtime_put_autosuspend(bdev->dev);
+	pm_request_resume(bdev->dev);
 
 	return IRQ_HANDLED;
 }
@@ -218,8 +269,10 @@ static int bcm_request_irq(struct bcm_data *bcm)
 			       bdev->irq_active_low ? IRQF_TRIGGER_FALLING :
 						      IRQF_TRIGGER_RISING,
 			       "host_wake", bdev);
-	if (err)
+	if (err) {
+		bdev->irq = err;
 		goto unlock;
+	}
 
 	device_init_wakeup(bdev->dev, true);
 
@@ -247,8 +300,8 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
 	/* Irrelevant USB flags */
 	.usb_auto_sleep = 0,
 	.usb_resume_timeout = 0,
+	.break_to_host = 0,
 	.pulsed_host_wake = 0,
-	.break_to_host = 0
 };
 
 static int bcm_setup_sleep(struct hci_uart *hu)
@@ -304,6 +357,7 @@ static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
 	struct list_head *p;
+	int err;
 
 	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
@@ -318,7 +372,10 @@ static int bcm_open(struct hci_uart *hu)
 	mutex_lock(&bcm_device_lock);
 
 	if (hu->serdev) {
-		serdev_device_open(hu->serdev);
+		err = serdev_device_open(hu->serdev);
+		if (err)
+			goto err_free;
+
 		bcm->dev = serdev_device_get_drvdata(hu->serdev);
 		goto out;
 	}
@@ -346,17 +403,30 @@ static int bcm_open(struct hci_uart *hu)
 	if (bcm->dev) {
 		hu->init_speed = bcm->dev->init_speed;
 		hu->oper_speed = bcm->dev->oper_speed;
-		bcm_gpio_set_power(bcm->dev, true);
+		err = bcm_gpio_set_power(bcm->dev, true);
+		if (err)
+			goto err_unset_hu;
 	}
 
 	mutex_unlock(&bcm_device_lock);
 	return 0;
+
+err_unset_hu:
+#ifdef CONFIG_PM
+	bcm->dev->hu = NULL;
+#endif
+err_free:
+	mutex_unlock(&bcm_device_lock);
+	hu->priv = NULL;
+	kfree(bcm);
+	return err;
 }
 
 static int bcm_close(struct hci_uart *hu)
 {
 	struct bcm_data *bcm = hu->priv;
 	struct bcm_device *bdev = NULL;
+	int err;
 
 	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
@@ -374,16 +444,17 @@ static int bcm_close(struct hci_uart *hu)
 	}
 
 	if (bdev) {
-		bcm_gpio_set_power(bdev, false);
-#ifdef CONFIG_PM
-		pm_runtime_disable(bdev->dev);
-		pm_runtime_set_suspended(bdev->dev);
-
-		if (device_can_wakeup(bdev->dev)) {
+		if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
 			devm_free_irq(bdev->dev, bdev->irq, bdev);
 			device_init_wakeup(bdev->dev, false);
+			pm_runtime_disable(bdev->dev);
 		}
-#endif
+
+		err = bcm_gpio_set_power(bdev, false);
+		if (err)
+			bt_dev_err(hu->hdev, "Failed to power down");
+		else
+			pm_runtime_set_suspended(bdev->dev);
 	}
 	mutex_unlock(&bcm_device_lock);
 
@@ -512,11 +583,8 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
 	} else if (!bcm->rx_skb) {
 		/* Delay auto-suspend when receiving completed packet */
 		mutex_lock(&bcm_device_lock);
-		if (bcm->dev && bcm_device_exists(bcm->dev)) {
-			pm_runtime_get(bcm->dev->dev);
-			pm_runtime_mark_last_busy(bcm->dev->dev);
-			pm_runtime_put_autosuspend(bcm->dev->dev);
-		}
+		if (bcm->dev && bcm_device_exists(bcm->dev))
+			pm_request_resume(bcm->dev->dev);
 		mutex_unlock(&bcm_device_lock);
 	}
 
@@ -566,6 +634,7 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
 static int bcm_suspend_device(struct device *dev)
 {
 	struct bcm_device *bdev = dev_get_drvdata(dev);
+	int err;
 
 	bt_dev_dbg(bdev, "");
 
@@ -577,27 +646,37 @@ static int bcm_suspend_device(struct device *dev)
 	}
 
 	/* Suspend the device */
-	if (bdev->device_wakeup) {
-		gpiod_set_value(bdev->device_wakeup, false);
-		bt_dev_dbg(bdev, "suspend, delaying 15 ms");
-		mdelay(15);
+	err = bdev->set_device_wakeup(bdev, false);
+	if (err) {
+		if (bdev->is_suspended && bdev->hu) {
+			bdev->is_suspended = false;
+			hci_uart_set_flow_control(bdev->hu, false);
+		}
+		return -EBUSY;
 	}
 
+	bt_dev_dbg(bdev, "suspend, delaying 15 ms");
+	msleep(15);
+
 	return 0;
 }
 
 static int bcm_resume_device(struct device *dev)
 {
 	struct bcm_device *bdev = dev_get_drvdata(dev);
+	int err;
 
 	bt_dev_dbg(bdev, "");
 
-	if (bdev->device_wakeup) {
-		gpiod_set_value(bdev->device_wakeup, true);
-		bt_dev_dbg(bdev, "resume, delaying 15 ms");
-		mdelay(15);
+	err = bdev->set_device_wakeup(bdev, true);
+	if (err) {
+		dev_err(dev, "Failed to power up\n");
+		return err;
 	}
 
+	bt_dev_dbg(bdev, "resume, delaying 15 ms");
+	msleep(15);
+
 	/* When this executes, the device has woken up already */
 	if (bdev->is_suspended && bdev->hu) {
 		bdev->is_suspended = false;
@@ -632,7 +711,7 @@ static int bcm_suspend(struct device *dev)
 	if (pm_runtime_active(dev))
 		bcm_suspend_device(dev);
 
-	if (device_may_wakeup(dev)) {
+	if (device_may_wakeup(dev) && bdev->irq > 0) {
 		error = enable_irq_wake(bdev->irq);
 		if (!error)
 			bt_dev_dbg(bdev, "BCM irq: enabled");
@@ -648,6 +727,7 @@ static int bcm_suspend(struct device *dev)
 static int bcm_resume(struct device *dev)
 {
 	struct bcm_device *bdev = dev_get_drvdata(dev);
+	int err = 0;
 
 	bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
 
@@ -662,19 +742,21 @@ static int bcm_resume(struct device *dev)
 	if (!bdev->hu)
 		goto unlock;
 
-	if (device_may_wakeup(dev)) {
+	if (device_may_wakeup(dev) && bdev->irq > 0) {
 		disable_irq_wake(bdev->irq);
 		bt_dev_dbg(bdev, "BCM irq: disabled");
 	}
 
-	bcm_resume_device(dev);
+	err = bcm_resume_device(dev);
 
 unlock:
 	mutex_unlock(&bcm_device_lock);
 
-	pm_runtime_disable(dev);
-	pm_runtime_set_active(dev);
-	pm_runtime_enable(dev);
+	if (!err) {
+		pm_runtime_disable(dev);
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
+	}
 
 	return 0;
 }
@@ -771,25 +853,84 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
 
 	return 0;
 }
+
+static int bcm_apple_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+	if (ACPI_FAILURE(acpi_execute_simple_method(dev->btlp, NULL, !awake)))
+		return -EIO;
+
+	return 0;
+}
+
+static int bcm_apple_set_shutdown(struct bcm_device *dev, bool powered)
+{
+	if (ACPI_FAILURE(acpi_evaluate_object(powered ? dev->btpu : dev->btpd,
+					      NULL, NULL, NULL)))
+		return -EIO;
+
+	return 0;
+}
+
+static int bcm_apple_get_resources(struct bcm_device *dev)
+{
+	struct acpi_device *adev = ACPI_COMPANION(dev->dev);
+	const union acpi_object *obj;
+
+	if (!adev ||
+	    ACPI_FAILURE(acpi_get_handle(adev->handle, "BTLP", &dev->btlp)) ||
+	    ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPU", &dev->btpu)) ||
+	    ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPD", &dev->btpd)))
+		return -ENODEV;
+
+	if (!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, &obj) &&
+	    obj->buffer.length == 8)
+		dev->init_speed = *(u64 *)obj->buffer.pointer;
+
+	dev->set_device_wakeup = bcm_apple_set_device_wakeup;
+	dev->set_shutdown = bcm_apple_set_shutdown;
+
+	return 0;
+}
+#else
+static inline int bcm_apple_get_resources(struct bcm_device *dev)
+{
+	return -EOPNOTSUPP;
+}
 #endif /* CONFIG_ACPI */
 
+static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake)
+{
+	gpiod_set_value(dev->device_wakeup, awake);
+	return 0;
+}
+
+static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
+{
+	gpiod_set_value(dev->shutdown, powered);
+	return 0;
+}
+
 static int bcm_get_resources(struct bcm_device *dev)
 {
 	dev->name = dev_name(dev->dev);
 
+	if (x86_apple_machine && !bcm_apple_get_resources(dev))
+		return 0;
+
 	dev->clk = devm_clk_get(dev->dev, NULL);
 
-	dev->device_wakeup = devm_gpiod_get_optional(dev->dev,
-						     "device-wakeup",
-						     GPIOD_OUT_LOW);
+	dev->device_wakeup = devm_gpiod_get(dev->dev, "device-wakeup",
+					    GPIOD_OUT_LOW);
 	if (IS_ERR(dev->device_wakeup))
 		return PTR_ERR(dev->device_wakeup);
 
-	dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown",
-						GPIOD_OUT_LOW);
+	dev->shutdown = devm_gpiod_get(dev->dev, "shutdown", GPIOD_OUT_LOW);
 	if (IS_ERR(dev->shutdown))
 		return PTR_ERR(dev->shutdown);
 
+	dev->set_device_wakeup = bcm_gpio_set_device_wakeup;
+	dev->set_shutdown = bcm_gpio_set_shutdown;
+
 	/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
 	if (dev->irq <= 0) {
 		struct gpio_desc *gpio;
@@ -802,7 +943,7 @@ static int bcm_get_resources(struct bcm_device *dev)
 		dev->irq = gpiod_to_irq(gpio);
 	}
 
-	dev_info(dev->dev, "BCM irq: %d\n", dev->irq);
+	dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq);
 	return 0;
 }
 
@@ -892,7 +1033,9 @@ static int bcm_probe(struct platform_device *pdev)
 	list_add_tail(&dev->list, &bcm_device_list);
 	mutex_unlock(&bcm_device_lock);
 
-	bcm_gpio_set_power(dev, false);
+	ret = bcm_gpio_set_power(dev, false);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to power down\n");
 
 	return 0;
 }
@@ -939,6 +1082,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
 	{ "BCM2E65", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+	{ "BCM2E72", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
 	{ "BCM2E7E", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
@@ -993,7 +1137,9 @@ static int bcm_serdev_probe(struct serdev_device *serdev)
 	if (err)
 		return err;
 
-	bcm_gpio_set_power(bcmdev, false);
+	err = bcm_gpio_set_power(bcmdev, false);
+	if (err)
+		dev_err(&serdev->dev, "Failed to power down\n");
 
 	return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto);
 }
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index e2c078d..1b4417a 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -53,9 +53,14 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <linux/gpio/consumer.h>
+#include <linux/nvmem-consumer.h>
 
 #include "hci_uart.h"
 
+/* Vendor-specific HCI commands */
+#define HCI_VS_WRITE_BD_ADDR			0xfc06
+#define HCI_VS_UPDATE_UART_HCI_BAUDRATE		0xff36
+
 /* HCILL commands */
 #define HCILL_GO_TO_SLEEP_IND	0x30
 #define HCILL_GO_TO_SLEEP_ACK	0x31
@@ -86,6 +91,7 @@ struct ll_device {
 	struct serdev_device *serdev;
 	struct gpio_desc *enable_gpio;
 	struct clk *ext_clk;
+	bdaddr_t bdaddr;
 };
 
 struct ll_struct {
@@ -620,7 +626,7 @@ static int download_firmware(struct ll_device *lldev)
 		case ACTION_SEND_COMMAND:	/* action send */
 			bt_dev_dbg(lldev->hu.hdev, "S");
 			cmd = (struct hci_command *)action_ptr;
-			if (cmd->opcode == 0xff36) {
+			if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
 				/* ignore remote change
 				 * baud rate HCI VS command
 				 */
@@ -628,11 +634,11 @@ static int download_firmware(struct ll_device *lldev)
 				break;
 			}
 			if (cmd->prefix != 1)
-				bt_dev_dbg(lldev->hu.hdev, "command type %d\n", cmd->prefix);
+				bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
 
 			skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT);
 			if (IS_ERR(skb)) {
-				bt_dev_err(lldev->hu.hdev, "send command failed\n");
+				bt_dev_err(lldev->hu.hdev, "send command failed");
 				err = PTR_ERR(skb);
 				goto out_rel_fw;
 			}
@@ -659,6 +665,24 @@ static int download_firmware(struct ll_device *lldev)
 	return err;
 }
 
+static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	bdaddr_t bdaddr_swapped;
+	struct sk_buff *skb;
+
+	/* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD
+	 * address to be MSB first, but bdaddr_t has the convention of being
+	 * LSB first.
+	 */
+	baswap(&bdaddr_swapped, bdaddr);
+	skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t),
+			     &bdaddr_swapped, HCI_INIT_TIMEOUT);
+	if (!IS_ERR(skb))
+		kfree_skb(skb);
+
+	return PTR_ERR_OR_ZERO(skb);
+}
+
 static int ll_setup(struct hci_uart *hu)
 {
 	int err, retry = 3;
@@ -671,14 +695,20 @@ static int ll_setup(struct hci_uart *hu)
 
 	lldev = serdev_device_get_drvdata(serdev);
 
+	hu->hdev->set_bdaddr = ll_set_bdaddr;
+
 	serdev_device_set_flow_control(serdev, true);
 
 	do {
-		/* Configure BT_EN to HIGH state */
+		/* Reset the Bluetooth device */
 		gpiod_set_value_cansleep(lldev->enable_gpio, 0);
 		msleep(5);
 		gpiod_set_value_cansleep(lldev->enable_gpio, 1);
-		msleep(100);
+		err = serdev_device_wait_for_cts(serdev, true, 200);
+		if (err) {
+			bt_dev_err(hu->hdev, "Failed to get CTS");
+			return err;
+		}
 
 		err = download_firmware(lldev);
 		if (!err)
@@ -691,6 +721,18 @@ static int ll_setup(struct hci_uart *hu)
 	if (err)
 		return err;
 
+	/* Set BD address if one was specified at probe */
+	if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) {
+		/* This means that there was an error getting the BD address
+		 * during probe, so mark the device as having a bad address.
+		 */
+		set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+	} else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
+		err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
+		if (err)
+			set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+	}
+
 	/* Operational speed if any */
 	if (hu->oper_speed)
 		speed = hu->oper_speed;
@@ -700,7 +742,12 @@ static int ll_setup(struct hci_uart *hu)
 		speed = 0;
 
 	if (speed) {
-		struct sk_buff *skb = __hci_cmd_sync(hu->hdev, 0xff36, sizeof(speed), &speed, HCI_INIT_TIMEOUT);
+		__le32 speed_le = cpu_to_le32(speed);
+		struct sk_buff *skb;
+
+		skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
+				     sizeof(speed_le), &speed_le,
+				     HCI_INIT_TIMEOUT);
 		if (!IS_ERR(skb)) {
 			kfree_skb(skb);
 			serdev_device_set_baudrate(serdev, speed);
@@ -716,6 +763,7 @@ static int hci_ti_probe(struct serdev_device *serdev)
 {
 	struct hci_uart *hu;
 	struct ll_device *lldev;
+	struct nvmem_cell *bdaddr_cell;
 	u32 max_speed = 3000000;
 
 	lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL);
@@ -737,6 +785,52 @@ static int hci_ti_probe(struct serdev_device *serdev)
 	of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
 	hci_uart_set_speeds(hu, 115200, max_speed);
 
+	/* optional BD address from nvram */
+	bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address");
+	if (IS_ERR(bdaddr_cell)) {
+		int err = PTR_ERR(bdaddr_cell);
+
+		if (err == -EPROBE_DEFER)
+			return err;
+
+		/* ENOENT means there is no matching nvmem cell and ENOSYS
+		 * means that nvmem is not enabled in the kernel configuration.
+		 */
+		if (err != -ENOENT && err != -ENOSYS) {
+			/* If there was some other error, give userspace a
+			 * chance to fix the problem instead of failing to load
+			 * the driver. Using BDADDR_NONE as a flag that is
+			 * tested later in the setup function.
+			 */
+			dev_warn(&serdev->dev,
+				 "Failed to get \"bd-address\" nvmem cell (%d)\n",
+				 err);
+			bacpy(&lldev->bdaddr, BDADDR_NONE);
+		}
+	} else {
+		bdaddr_t *bdaddr;
+		size_t len;
+
+		bdaddr = nvmem_cell_read(bdaddr_cell, &len);
+		nvmem_cell_put(bdaddr_cell);
+		if (IS_ERR(bdaddr)) {
+			dev_err(&serdev->dev, "Failed to read nvmem bd-address\n");
+			return PTR_ERR(bdaddr);
+		}
+		if (len != sizeof(bdaddr_t)) {
+			dev_err(&serdev->dev, "Invalid nvmem bd-address length\n");
+			kfree(bdaddr);
+			return -EINVAL;
+		}
+
+		/* As per the device tree bindings, the value from nvmem is
+		 * expected to be MSB first, but in the kernel it is expected
+		 * that bdaddr_t is LSB first.
+		 */
+		baswap(&lldev->bdaddr, bdaddr);
+		kfree(bdaddr);
+	}
+
 	return hci_uart_register_device(hu, &llp);
 }
 
@@ -748,6 +842,7 @@ static void hci_ti_remove(struct serdev_device *serdev)
 }
 
 static const struct of_device_id hci_ti_of_match[] = {
+	{ .compatible = "ti,cc2560" },
 	{ .compatible = "ti,wl1271-st" },
 	{ .compatible = "ti,wl1273-st" },
 	{ .compatible = "ti,wl1281-st" },
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index bbd7db7..05ec530 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -932,6 +932,9 @@ static int qca_setup(struct hci_uart *hu)
 	if (!ret) {
 		set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
 		qca_debugfs_init(hdev);
+	} else if (ret == -ENOENT) {
+		/* No patch/nvm-config found, run with original fw/config */
+		ret = 0;
 	}
 
 	/* Setup bdaddr */
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 71664b2..e0e6461 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -303,6 +303,7 @@ int hci_uart_register_device(struct hci_uart *hu,
 	hci_set_drvdata(hdev, hu);
 
 	INIT_WORK(&hu->write_work, hci_uart_write_work);
+	percpu_init_rwsem(&hu->proto_lock);
 
 	/* Only when vendor specific setup callback is provided, consider
 	 * the manufacturer information valid. This avoids filling in the
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 12eb8ca..50e0714 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -140,6 +140,29 @@ static u32 hv_copyto_ringbuffer(
 	return start_write_offset;
 }
 
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static void
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+			     u32 *read, u32 *write)
+{
+	u32 read_loc, write_loc, dsize;
+
+	/* Capture the read/write indices before they changed */
+	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
+	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+	dsize = rbi->ring_datasize;
+
+	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+		read_loc - write_loc;
+	*read = dsize - *write;
+}
+
 /* Get various debug metrics for the specified ring buffer. */
 void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
 				 struct hv_ring_buffer_debug_info *debug_info)
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 50812b3..a9c3378 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
 			     struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int rc;
 
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
index b7587f1..78b4900 100644
--- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -164,6 +164,13 @@ struct rdma_srq_sge {
 	__le32 l_key;
 };
 
+/* Rdma doorbell data for flags update */
+struct rdma_pwm_flags_data {
+	__le16 icid; /* internal CID */
+	u8 agg_flags; /* aggregative flags */
+	u8 reserved;
+};
+
 /* Rdma doorbell data for SQ and RQ */
 struct rdma_pwm_val16_data {
 	__le16 icid;
@@ -180,12 +187,16 @@ struct rdma_pwm_val32_data {
 	__le16 icid;
 	u8 agg_flags;
 	u8 params;
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK    0x3
-#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT   0
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK  0x1
-#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
-#define RDMA_PWM_VAL32_DATA_RESERVED_MASK   0x1F
-#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT  3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK		0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT		0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK		0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT		2
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK	0x1
+#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT	3
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK		0x1
+#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT		4
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK		0x7
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT		5
 	__le32 value;
 };
 
@@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe {
 	__le16 dif_app_tag_mask;
 	__le16 dif_runt_crc_value;
 	__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1
-#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1
-#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1
-#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
-#define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0x1FF
-#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              7
-	__le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT	0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK		0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT		1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT	2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT	3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT	4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT	5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK		0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT		6
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK	0x1
+#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT	7
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK			0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT			8
+	__le32 reserved5;
 };
 
 /* First element (16 bytes) of fmr wqe */
@@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd {
 	__le16 dif_app_tag_mask;
 	__le16 dif_runt_crc_value;
 	__le16 dif_flags;
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1
-#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
-#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
-	__le32 Reserved5;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT		0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK			0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT		1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT		2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT	3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT		4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT		5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK			0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT			6
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK		0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT		7
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK			0xFF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT				8
+	__le32 reserved5;
 };
 
 struct rdma_sq_local_inv_wqe {
@@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe {
 	__le32 xrc_srq;
 	u8 req_type;
 	u8 flags;
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1
-#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1
-#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1
-#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
-#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1
-#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1
-#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1
-#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
-#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x3
-#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                6
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK		0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT		0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT	1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT	2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK		0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT		3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT	4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT	5
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK	0x1
+#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT	6
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK		0x1
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT	7
 	u8 wqe_size;
 	u8 prev_wqe_size;
 	struct regpair remote_va;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0936da5..944ec3c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -497,4 +497,15 @@
 
 source "drivers/net/hyperv/Kconfig"
 
+config NETDEVSIM
+	tristate "Simulated networking device"
+	depends on DEBUG_FS
+	help
+	  This driver is a developer testing tool and software model that can
+	  be used to test various control path networking APIs, especially
+	  HW-offload related.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called netdevsim.
+
 endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 766f62d..04c3b74 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -78,3 +78,4 @@
 
 thunderbolt-net-y += thunderbolt.o
 obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o
+obj-$(CONFIG_NETDEVSIM) += netdevsim/
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 8a9b085..58c705f 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1431,13 +1431,9 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
 {
 	u8 macaddr[ETH_ALEN];
 	u8 *mac;
-	int i;
 
 	if (newval->string) {
-		i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-			   &macaddr[0], &macaddr[1], &macaddr[2],
-			   &macaddr[3], &macaddr[4], &macaddr[5]);
-		if (i != ETH_ALEN)
+		if (!mac_pton(newval->string, macaddr))
 			goto err;
 		mac = macaddr;
 	} else {
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index d065c0e..406b484 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -251,14 +251,14 @@ static void c_can_pci_remove(struct pci_dev *pdev)
 	pci_disable_device(pdev);
 }
 
-static struct c_can_pci_data c_can_sta2x11= {
+static const struct c_can_pci_data c_can_sta2x11= {
 	.type = BOSCH_C_CAN,
 	.reg_align = C_CAN_REG_ALIGN_32,
 	.freq = 52000000, /* 52 Mhz */
 	.bar = 0,
 };
 
-static struct c_can_pci_data c_can_pch = {
+static const struct c_can_pci_data c_can_pch = {
 	.type = BOSCH_C_CAN,
 	.reg_align = C_CAN_REG_32,
 	.freq = 50000000, /* 50 MHz */
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 760d2c0..634c51e 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -190,6 +190,7 @@
  *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
  *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
  *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
+ * LS1021A FlexCAN2  03.00.04.00     no       yes        no       no       yes
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -279,6 +280,10 @@ struct flexcan_priv {
 	struct clk *clk_per;
 	const struct flexcan_devtype_data *devtype_data;
 	struct regulator *reg_xceiver;
+
+	/* Read and Write APIs */
+	u32 (*read)(void __iomem *addr);
+	void (*write)(u32 val, void __iomem *addr);
 };
 
 static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
@@ -301,6 +306,12 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
 		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
 
+static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+		FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+};
+
 static const struct can_bittiming_const flexcan_bittiming_const = {
 	.name = DRV_NAME,
 	.tseg1_min = 4,
@@ -313,39 +324,45 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
 	.brp_inc = 1,
 };
 
-/* Abstract off the read/write for arm versus ppc. This
- * assumes that PPC uses big-endian registers and everything
- * else uses little-endian registers, independent of CPU
- * endianness.
+/* FlexCAN module is essentially modelled as a little-endian IP in most
+ * SoCs, i.e the registers as well as the message buffer areas are
+ * implemented in a little-endian fashion.
+ *
+ * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN
+ * module in a big-endian fashion (i.e the registers as well as the
+ * message buffer areas are implemented in a big-endian way).
+ *
+ * In addition, the FlexCAN module can be found on SoCs having ARM or
+ * PPC cores. So, we need to abstract off the register read/write
+ * functions, ensuring that these cater to all the combinations of module
+ * endianness and underlying CPU endianness.
  */
-#if defined(CONFIG_PPC)
-static inline u32 flexcan_read(void __iomem *addr)
+static inline u32 flexcan_read_be(void __iomem *addr)
 {
-	return in_be32(addr);
+	return ioread32be(addr);
 }
 
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline void flexcan_write_be(u32 val, void __iomem *addr)
 {
-	out_be32(addr, val);
-}
-#else
-static inline u32 flexcan_read(void __iomem *addr)
-{
-	return readl(addr);
+	iowrite32be(val, addr);
 }
 
-static inline void flexcan_write(u32 val, void __iomem *addr)
+static inline u32 flexcan_read_le(void __iomem *addr)
 {
-	writel(val, addr);
+	return ioread32(addr);
 }
-#endif
+
+static inline void flexcan_write_le(u32 val, void __iomem *addr)
+{
+	iowrite32(val, addr);
+}
 
 static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
 {
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
 
-	flexcan_write(reg_ctrl, &regs->ctrl);
+	priv->write(reg_ctrl, &regs->ctrl);
 }
 
 static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
@@ -353,7 +370,7 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
 
-	flexcan_write(reg_ctrl, &regs->ctrl);
+	priv->write(reg_ctrl, &regs->ctrl);
 }
 
 static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
@@ -378,14 +395,14 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg &= ~FLEXCAN_MCR_MDIS;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
 		udelay(10);
 
-	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+	if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -397,14 +414,14 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg |= FLEXCAN_MCR_MDIS;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+	while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
 		udelay(10);
 
-	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+	if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
 		return -ETIMEDOUT;
 
 	return 0;
@@ -416,14 +433,14 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
 	unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg |= FLEXCAN_MCR_HALT;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+	while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
 		udelay(100);
 
-	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+	if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
 		return -ETIMEDOUT;
 
 	return 0;
@@ -435,14 +452,14 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 	u32 reg;
 
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg &= ~FLEXCAN_MCR_HALT;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
-	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
 		udelay(10);
 
-	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+	if (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -453,11 +470,11 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
 
-	flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
-	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+	priv->write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
 		udelay(10);
 
-	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+	if (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
 		return -ETIMEDOUT;
 
 	return 0;
@@ -468,7 +485,7 @@ static int __flexcan_get_berr_counter(const struct net_device *dev,
 {
 	const struct flexcan_priv *priv = netdev_priv(dev);
 	struct flexcan_regs __iomem *regs = priv->regs;
-	u32 reg = flexcan_read(&regs->ecr);
+	u32 reg = priv->read(&regs->ecr);
 
 	bec->txerr = (reg >> 0) & 0xff;
 	bec->rxerr = (reg >> 8) & 0xff;
@@ -524,24 +541,24 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	if (cf->can_dlc > 0) {
 		data = be32_to_cpup((__be32 *)&cf->data[0]);
-		flexcan_write(data, &priv->tx_mb->data[0]);
+		priv->write(data, &priv->tx_mb->data[0]);
 	}
 	if (cf->can_dlc > 4) {
 		data = be32_to_cpup((__be32 *)&cf->data[4]);
-		flexcan_write(data, &priv->tx_mb->data[1]);
+		priv->write(data, &priv->tx_mb->data[1]);
 	}
 
 	can_put_echo_skb(skb, dev, 0);
 
-	flexcan_write(can_id, &priv->tx_mb->can_id);
-	flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
+	priv->write(can_id, &priv->tx_mb->can_id);
+	priv->write(ctrl, &priv->tx_mb->can_ctrl);
 
 	/* Errata ERR005829 step8:
 	 * Write twice INACTIVE(0x8) code to first MB.
 	 */
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
 		      &priv->tx_mb_reserved->can_ctrl);
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
 		      &priv->tx_mb_reserved->can_ctrl);
 
 	return NETDEV_TX_OK;
@@ -660,7 +677,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
 		u32 code;
 
 		do {
-			reg_ctrl = flexcan_read(&mb->can_ctrl);
+			reg_ctrl = priv->read(&mb->can_ctrl);
 		} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
 
 		/* is this MB empty? */
@@ -675,17 +692,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
 			offload->dev->stats.rx_errors++;
 		}
 	} else {
-		reg_iflag1 = flexcan_read(&regs->iflag1);
+		reg_iflag1 = priv->read(&regs->iflag1);
 		if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
 			return 0;
 
-		reg_ctrl = flexcan_read(&mb->can_ctrl);
+		reg_ctrl = priv->read(&mb->can_ctrl);
 	}
 
 	/* increase timstamp to full 32 bit */
 	*timestamp = reg_ctrl << 16;
 
-	reg_id = flexcan_read(&mb->can_id);
+	reg_id = priv->read(&mb->can_id);
 	if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
 		cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
 	else
@@ -695,19 +712,19 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
 		cf->can_id |= CAN_RTR_FLAG;
 	cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
 
-	*(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
-	*(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
+	*(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
+	*(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
 
 	/* mark as read */
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
 		/* Clear IRQ */
 		if (n < 32)
-			flexcan_write(BIT(n), &regs->iflag1);
+			priv->write(BIT(n), &regs->iflag1);
 		else
-			flexcan_write(BIT(n - 32), &regs->iflag2);
+			priv->write(BIT(n - 32), &regs->iflag2);
 	} else {
-		flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
-		flexcan_read(&regs->timer);
+		priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+		priv->read(&regs->timer);
 	}
 
 	return 1;
@@ -719,8 +736,8 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 iflag1, iflag2;
 
-	iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
-	iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+	iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
+	iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
 		~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
 
 	return (u64)iflag2 << 32 | iflag1;
@@ -736,7 +753,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
 	u32 reg_iflag1, reg_esr;
 	enum can_state last_state = priv->can.state;
 
-	reg_iflag1 = flexcan_read(&regs->iflag1);
+	reg_iflag1 = priv->read(&regs->iflag1);
 
 	/* reception interrupt */
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -759,7 +776,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
 		/* FIFO overflow interrupt */
 		if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
 			handled = IRQ_HANDLED;
-			flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+			priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW,
+				    &regs->iflag1);
 			dev->stats.rx_over_errors++;
 			dev->stats.rx_errors++;
 		}
@@ -773,18 +791,18 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
 		can_led_event(dev, CAN_LED_EVENT_TX);
 
 		/* after sending a RTR frame MB is in RX mode */
-		flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-			      &priv->tx_mb->can_ctrl);
-		flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+		priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+			    &priv->tx_mb->can_ctrl);
+		priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
 		netif_wake_queue(dev);
 	}
 
-	reg_esr = flexcan_read(&regs->esr);
+	reg_esr = priv->read(&regs->esr);
 
 	/* ACK all bus error and state change IRQ sources */
 	if (reg_esr & FLEXCAN_ESR_ALL_INT) {
 		handled = IRQ_HANDLED;
-		flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+		priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
 	}
 
 	/* state change interrupt or broken error state quirk fix is enabled */
@@ -846,7 +864,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
 	struct flexcan_regs __iomem *regs = priv->regs;
 	u32 reg;
 
-	reg = flexcan_read(&regs->ctrl);
+	reg = priv->read(&regs->ctrl);
 	reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
 		 FLEXCAN_CTRL_RJW(0x3) |
 		 FLEXCAN_CTRL_PSEG1(0x7) |
@@ -870,11 +888,11 @@ static void flexcan_set_bittiming(struct net_device *dev)
 		reg |= FLEXCAN_CTRL_SMP;
 
 	netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
-	flexcan_write(reg, &regs->ctrl);
+	priv->write(reg, &regs->ctrl);
 
 	/* print chip status */
 	netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
-		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+		   priv->read(&regs->mcr), priv->read(&regs->ctrl));
 }
 
 /* flexcan_chip_start
@@ -913,7 +931,7 @@ static int flexcan_chip_start(struct net_device *dev)
 	 * choose format C
 	 * set max mailbox number
 	 */
-	reg_mcr = flexcan_read(&regs->mcr);
+	reg_mcr = priv->read(&regs->mcr);
 	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
 	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
 		FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
@@ -927,7 +945,7 @@ static int flexcan_chip_start(struct net_device *dev)
 			FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
 	}
 	netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
-	flexcan_write(reg_mcr, &regs->mcr);
+	priv->write(reg_mcr, &regs->mcr);
 
 	/* CTRL
 	 *
@@ -940,7 +958,7 @@ static int flexcan_chip_start(struct net_device *dev)
 	 * enable bus off interrupt
 	 * (== FLEXCAN_CTRL_ERR_STATE)
 	 */
-	reg_ctrl = flexcan_read(&regs->ctrl);
+	reg_ctrl = priv->read(&regs->ctrl);
 	reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
 	reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
 		FLEXCAN_CTRL_ERR_STATE;
@@ -960,45 +978,45 @@ static int flexcan_chip_start(struct net_device *dev)
 	/* leave interrupts disabled for now */
 	reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
 	netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
-	flexcan_write(reg_ctrl, &regs->ctrl);
+	priv->write(reg_ctrl, &regs->ctrl);
 
 	if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
-		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 = priv->read(&regs->ctrl2);
 		reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
-		flexcan_write(reg_ctrl2, &regs->ctrl2);
+		priv->write(reg_ctrl2, &regs->ctrl2);
 	}
 
 	/* clear and invalidate all mailboxes first */
 	for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
-		flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
-			      &regs->mb[i].can_ctrl);
+		priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+			    &regs->mb[i].can_ctrl);
 	}
 
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
 		for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
-			flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
-				      &regs->mb[i].can_ctrl);
+			priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
+				    &regs->mb[i].can_ctrl);
 	}
 
 	/* Errata ERR005829: mark first TX mailbox as INACTIVE */
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &priv->tx_mb_reserved->can_ctrl);
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		    &priv->tx_mb_reserved->can_ctrl);
 
 	/* mark TX mailbox as INACTIVE */
-	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-		      &priv->tx_mb->can_ctrl);
+	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		    &priv->tx_mb->can_ctrl);
 
 	/* acceptance mask/acceptance code (accept everything) */
-	flexcan_write(0x0, &regs->rxgmask);
-	flexcan_write(0x0, &regs->rx14mask);
-	flexcan_write(0x0, &regs->rx15mask);
+	priv->write(0x0, &regs->rxgmask);
+	priv->write(0x0, &regs->rx14mask);
+	priv->write(0x0, &regs->rx15mask);
 
 	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
-		flexcan_write(0x0, &regs->rxfgmask);
+		priv->write(0x0, &regs->rxfgmask);
 
 	/* clear acceptance filters */
 	for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
-		flexcan_write(0, &regs->rximr[i]);
+		priv->write(0, &regs->rximr[i]);
 
 	/* On Vybrid, disable memory error detection interrupts
 	 * and freeze mode.
@@ -1011,16 +1029,16 @@ static int flexcan_chip_start(struct net_device *dev)
 		 * and Correction of Memory Errors" to write to
 		 * MECR register
 		 */
-		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 = priv->read(&regs->ctrl2);
 		reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
-		flexcan_write(reg_ctrl2, &regs->ctrl2);
+		priv->write(reg_ctrl2, &regs->ctrl2);
 
-		reg_mecr = flexcan_read(&regs->mecr);
+		reg_mecr = priv->read(&regs->mecr);
 		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
-		flexcan_write(reg_mecr, &regs->mecr);
+		priv->write(reg_mecr, &regs->mecr);
 		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
 			      FLEXCAN_MECR_FANCEI_MSK);
-		flexcan_write(reg_mecr, &regs->mecr);
+		priv->write(reg_mecr, &regs->mecr);
 	}
 
 	err = flexcan_transceiver_enable(priv);
@@ -1036,14 +1054,14 @@ static int flexcan_chip_start(struct net_device *dev)
 
 	/* enable interrupts atomically */
 	disable_irq(dev->irq);
-	flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
-	flexcan_write(priv->reg_imask1_default, &regs->imask1);
-	flexcan_write(priv->reg_imask2_default, &regs->imask2);
+	priv->write(priv->reg_ctrl_default, &regs->ctrl);
+	priv->write(priv->reg_imask1_default, &regs->imask1);
+	priv->write(priv->reg_imask2_default, &regs->imask2);
 	enable_irq(dev->irq);
 
 	/* print chip status */
 	netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
-		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+		   priv->read(&regs->mcr), priv->read(&regs->ctrl));
 
 	return 0;
 
@@ -1068,10 +1086,10 @@ static void flexcan_chip_stop(struct net_device *dev)
 	flexcan_chip_disable(priv);
 
 	/* Disable all interrupts */
-	flexcan_write(0, &regs->imask2);
-	flexcan_write(0, &regs->imask1);
-	flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
-		      &regs->ctrl);
+	priv->write(0, &regs->imask2);
+	priv->write(0, &regs->imask1);
+	priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+		    &regs->ctrl);
 
 	flexcan_transceiver_disable(priv);
 	priv->can.state = CAN_STATE_STOPPED;
@@ -1186,26 +1204,26 @@ static int register_flexcandev(struct net_device *dev)
 	err = flexcan_chip_disable(priv);
 	if (err)
 		goto out_disable_per;
-	reg = flexcan_read(&regs->ctrl);
+	reg = priv->read(&regs->ctrl);
 	reg |= FLEXCAN_CTRL_CLK_SRC;
-	flexcan_write(reg, &regs->ctrl);
+	priv->write(reg, &regs->ctrl);
 
 	err = flexcan_chip_enable(priv);
 	if (err)
 		goto out_chip_disable;
 
 	/* set freeze, halt and activate FIFO, restrict register access */
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
 		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
-	flexcan_write(reg, &regs->mcr);
+	priv->write(reg, &regs->mcr);
 
 	/* Currently we only support newer versions of this core
 	 * featuring a RX hardware FIFO (although this driver doesn't
 	 * make use of it on some cores). Older cores, found on some
 	 * Coldfire derivates are not tested.
 	 */
-	reg = flexcan_read(&regs->mcr);
+	reg = priv->read(&regs->mcr);
 	if (!(reg & FLEXCAN_MCR_FEN)) {
 		netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
 		err = -ENODEV;
@@ -1233,8 +1251,12 @@ static void unregister_flexcandev(struct net_device *dev)
 static const struct of_device_id flexcan_of_match[] = {
 	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
 	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+	{ .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+	{ .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+	{ .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
 	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
 	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+	{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1314,6 +1336,21 @@ static int flexcan_probe(struct platform_device *pdev)
 	dev->flags |= IFF_ECHO;
 
 	priv = netdev_priv(dev);
+
+	if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
+		priv->read = flexcan_read_be;
+		priv->write = flexcan_write_be;
+	} else {
+		if (of_device_is_compatible(pdev->dev.of_node,
+					    "fsl,p1010-flexcan")) {
+			priv->read = flexcan_read_be;
+			priv->write = flexcan_write_be;
+		} else {
+			priv->read = flexcan_read_le;
+			priv->write = flexcan_write_le;
+		}
+	}
+
 	priv->can.clock.freq = clock_freq;
 	priv->can.bittiming_const = &flexcan_bittiming_const;
 	priv->can.do_set_mode = flexcan_set_mode;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25a9b79..f530a80 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -408,7 +408,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
 {
 	struct sk_buff *skb;
 	struct can_frame *cf;
-	struct timeval tv;
 	enum can_state new_state;
 
 	/* ignore this error until 1st ts received */
@@ -525,8 +524,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
 	if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
 		struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
 
-		peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
-		hwts->hwtstamp = timeval_to_ktime(tv);
+		peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16,
+				     &hwts->hwtstamp);
 	}
 
 	mc->netdev->stats.rx_packets++;
@@ -610,7 +609,6 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
 	u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC;
 	struct sk_buff *skb;
 	struct can_frame *cf;
-	struct timeval tv;
 	struct skb_shared_hwtstamps *hwts;
 
 	skb = alloc_can_skb(mc->netdev, &cf);
@@ -658,9 +656,8 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
 	}
 
 	/* convert timestamp into kernel time */
-	peak_usb_get_ts_tv(&mc->pdev->time_ref, mc->ts16, &tv);
 	hwts = skb_hwtstamps(skb);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
 
 	/* update statistics */
 	mc->netdev->stats.rx_packets++;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1ca76e0..8f699ee 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -80,21 +80,6 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
 	}
 }
 
-static void peak_usb_add_us(struct timeval *tv, u32 delta_us)
-{
-	/* number of s. to add to final time */
-	u32 delta_s = delta_us / 1000000;
-
-	delta_us -= delta_s * 1000000;
-
-	tv->tv_usec += delta_us;
-	if (tv->tv_usec >= 1000000) {
-		tv->tv_usec -= 1000000;
-		delta_s++;
-	}
-	tv->tv_sec += delta_s;
-}
-
 /*
  * sometimes, another now may be  more recent than current one...
  */
@@ -103,7 +88,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
 	time_ref->ts_dev_2 = ts_now;
 
 	/* should wait at least two passes before computing */
-	if (time_ref->tv_host.tv_sec > 0) {
+	if (ktime_to_ns(time_ref->tv_host) > 0) {
 		u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
 
 		if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
@@ -118,26 +103,26 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
  */
 void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
 {
-	if (time_ref->tv_host_0.tv_sec == 0) {
+	if (ktime_to_ns(time_ref->tv_host_0) == 0) {
 		/* use monotonic clock to correctly compute further deltas */
-		time_ref->tv_host_0 = ktime_to_timeval(ktime_get());
-		time_ref->tv_host.tv_sec = 0;
+		time_ref->tv_host_0 = ktime_get();
+		time_ref->tv_host = ktime_set(0, 0);
 	} else {
 		/*
-		 * delta_us should not be >= 2^32 => delta_s should be < 4294
+		 * delta_us should not be >= 2^32 => delta should be < 4294s
 		 * handle 32-bits wrapping here: if count of s. reaches 4200,
 		 * reset counters and change time base
 		 */
-		if (time_ref->tv_host.tv_sec != 0) {
-			u32 delta_s = time_ref->tv_host.tv_sec
-						- time_ref->tv_host_0.tv_sec;
-			if (delta_s > 4200) {
+		if (ktime_to_ns(time_ref->tv_host)) {
+			ktime_t delta = ktime_sub(time_ref->tv_host,
+						  time_ref->tv_host_0);
+			if (ktime_to_ns(delta) > (4200ull * NSEC_PER_SEC)) {
 				time_ref->tv_host_0 = time_ref->tv_host;
 				time_ref->ts_total = 0;
 			}
 		}
 
-		time_ref->tv_host = ktime_to_timeval(ktime_get());
+		time_ref->tv_host = ktime_get();
 		time_ref->tick_count++;
 	}
 
@@ -146,13 +131,12 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
 }
 
 /*
- * compute timeval according to current ts and time_ref data
+ * compute time according to current ts and time_ref data
  */
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
-			struct timeval *tv)
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
 {
-	/* protect from getting timeval before setting now */
-	if (time_ref->tv_host.tv_sec > 0) {
+	/* protect from getting time before setting now */
+	if (ktime_to_ns(time_ref->tv_host)) {
 		u64 delta_us;
 
 		delta_us = ts - time_ref->ts_dev_2;
@@ -164,10 +148,9 @@ void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
 		delta_us *= time_ref->adapter->us_per_ts_scale;
 		delta_us >>= time_ref->adapter->us_per_ts_shift;
 
-		*tv = time_ref->tv_host_0;
-		peak_usb_add_us(tv, (u32)delta_us);
+		*time = ktime_add_us(time_ref->tv_host_0, delta_us);
 	} else {
-		*tv = ktime_to_timeval(ktime_get());
+		*time = ktime_get();
 	}
 }
 
@@ -178,10 +161,8 @@ int peak_usb_netif_rx(struct sk_buff *skb,
 		      struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high)
 {
 	struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
-	struct timeval tv;
 
-	peak_usb_get_ts_tv(time_ref, ts_low, &tv);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(time_ref, ts_low, &hwts->hwtstamp);
 
 	return netif_rx(skb);
 }
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index c01316ca..29f03dc 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -96,7 +96,7 @@ extern const struct peak_usb_adapter pcan_usb_pro_fd;
 extern const struct peak_usb_adapter pcan_usb_x6;
 
 struct peak_time_ref {
-	struct timeval tv_host_0, tv_host;
+	ktime_t tv_host_0, tv_host;
 	u32 ts_dev_1, ts_dev_2;
 	u64 ts_total;
 	u32 tick_count;
@@ -151,8 +151,7 @@ void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
 			    const struct peak_usb_adapter *adapter);
 void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
 void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
-void peak_usb_get_ts_tv(struct peak_time_ref *time_ref, u32 ts,
-			struct timeval *tv);
+void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
 int peak_usb_netif_rx(struct sk_buff *skb,
 		      struct peak_time_ref *time_ref, u32 ts_low, u32 ts_high);
 void peak_usb_async_complete(struct urb *urb);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index bbdd605..0105fbf 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -531,7 +531,6 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
 	struct net_device *netdev = dev->netdev;
 	struct can_frame *can_frame;
 	struct sk_buff *skb;
-	struct timeval tv;
 	struct skb_shared_hwtstamps *hwts;
 
 	skb = alloc_can_skb(netdev, &can_frame);
@@ -549,9 +548,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
 	else
 		memcpy(can_frame->data, rx->data, can_frame->can_dlc);
 
-	peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv);
 	hwts = skb_hwtstamps(skb);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
+			     &hwts->hwtstamp);
 
 	netdev->stats.rx_packets++;
 	netdev->stats.rx_bytes += can_frame->can_dlc;
@@ -571,7 +570,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
 	enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
 	u8 err_mask = 0;
 	struct sk_buff *skb;
-	struct timeval tv;
 	struct skb_shared_hwtstamps *hwts;
 
 	/* nothing should be sent while in BUS_OFF state */
@@ -667,9 +665,8 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
 
 	dev->can.state = new_state;
 
-	peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
 	hwts = skb_hwtstamps(skb);
-	hwts->hwtstamp = timeval_to_ktime(tv);
+	peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
 	netdev->stats.rx_packets++;
 	netdev->stats.rx_bytes += can_frame->can_dlc;
 	netif_rx(skb);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index b4c4a2c..ed68288 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -227,10 +227,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
 	netif_carrier_off(peer);
 
 	err = rtnl_configure_link(peer, ifmp);
-	if (err < 0) {
-		unregister_netdevice(peer);
-		return err;
-	}
+	if (err < 0)
+		goto unregister_network_device;
 
 	/* register first device */
 	if (tb[IFLA_IFNAME])
@@ -239,10 +237,8 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
 		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
 
 	err = register_netdevice(dev);
-	if (err < 0) {
-		unregister_netdevice(peer);
-		return err;
-	}
+	if (err < 0)
+		goto unregister_network_device;
 
 	netif_carrier_off(dev);
 
@@ -254,6 +250,10 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
 	rcu_assign_pointer(priv->peer, dev);
 
 	return 0;
+
+unregister_network_device:
+	unregister_netdevice(peer);
+	return err;
 }
 
 static void vxcan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 83a9bc8..2b81b97 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -33,7 +33,7 @@
 
 config NET_DSA_MV88E6060
 	tristate "Marvell 88E6060 ethernet switch chip support"
-	depends on NET_DSA
+	depends on NET_DSA && NET_DSA_LEGACY
 	select NET_DSA_TAG_TRAILER
 	---help---
 	  This enables support for the Marvell 88E6060 ethernet switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 4498ab8..db830a1 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1029,8 +1029,7 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
 EXPORT_SYMBOL(b53_vlan_filtering);
 
 int b53_vlan_prepare(struct dsa_switch *ds, int port,
-		     const struct switchdev_obj_port_vlan *vlan,
-		     struct switchdev_trans *trans)
+		     const struct switchdev_obj_port_vlan *vlan)
 {
 	struct b53_device *dev = ds->priv;
 
@@ -1047,8 +1046,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
 EXPORT_SYMBOL(b53_vlan_prepare);
 
 void b53_vlan_add(struct dsa_switch *ds, int port,
-		  const struct switchdev_obj_port_vlan *vlan,
-		  struct switchdev_trans *trans)
+		  const struct switchdev_obj_port_vlan *vlan)
 {
 	struct b53_device *dev = ds->priv;
 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1495,8 +1493,7 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port)
 	return false;
 }
 
-static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
-						  int port)
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
 {
 	struct b53_device *dev = ds->priv;
 
@@ -1517,6 +1514,7 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
 
 	return DSA_TAG_PROTO_BRCM;
 }
+EXPORT_SYMBOL(b53_get_tag_protocol);
 
 int b53_mirror_add(struct dsa_switch *ds, int port,
 		   struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index daaaa1e..d954cf3 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -295,11 +295,9 @@ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
 void b53_br_fast_age(struct dsa_switch *ds, int port);
 int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
 int b53_vlan_prepare(struct dsa_switch *ds, int port,
-		     const struct switchdev_obj_port_vlan *vlan,
-		     struct switchdev_trans *trans);
+		     const struct switchdev_obj_port_vlan *vlan);
 void b53_vlan_add(struct dsa_switch *ds, int port,
-		  const struct switchdev_obj_port_vlan *vlan,
-		  struct switchdev_trans *trans);
+		  const struct switchdev_obj_port_vlan *vlan);
 int b53_vlan_del(struct dsa_switch *ds, int port,
 		 const struct switchdev_obj_port_vlan *vlan);
 int b53_fdb_add(struct dsa_switch *ds, int port,
@@ -310,6 +308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
 		 dsa_fdb_dump_cb_t *cb, void *data);
 int b53_mirror_add(struct dsa_switch *ds, int port,
 		   struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port);
 void b53_mirror_del(struct dsa_switch *ds, int port,
 		    struct dsa_mall_mirror_tc_entry *mirror);
 int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b62d472..0378ede 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -34,12 +34,6 @@
 #include "b53/b53_priv.h"
 #include "b53/b53_regs.h"
 
-static enum dsa_tag_protocol bcm_sf2_sw_get_tag_protocol(struct dsa_switch *ds,
-							 int port)
-{
-	return DSA_TAG_PROTO_BRCM;
-}
-
 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 {
 	struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -860,7 +854,7 @@ static const struct b53_io_ops bcm_sf2_io_ops = {
 };
 
 static const struct dsa_switch_ops bcm_sf2_ops = {
-	.get_tag_protocol	= bcm_sf2_sw_get_tag_protocol,
+	.get_tag_protocol	= b53_get_tag_protocol,
 	.setup			= bcm_sf2_sw_setup,
 	.get_strings		= b53_get_strings,
 	.get_ethtool_stats	= b53_get_ethtool_stats,
@@ -954,6 +948,9 @@ static const struct of_device_id bcm_sf2_of_match[] = {
 	{ .compatible = "brcm,bcm7278-switch-v4.0",
 	  .data = &bcm_sf2_7278_data
 	},
+	{ .compatible = "brcm,bcm7278-switch-v4.8",
+	  .data = &bcm_sf2_7278_data
+	},
 	{ /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index bb71d3d..7aa84ee 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -174,9 +174,9 @@ static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
 	return 0;
 }
 
-static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
-				      const struct switchdev_obj_port_vlan *vlan,
-				      struct switchdev_trans *trans)
+static int
+dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
+			   const struct switchdev_obj_port_vlan *vlan)
 {
 	struct dsa_loop_priv *ps = ds->priv;
 	struct mii_bus *bus = ps->bus;
@@ -193,8 +193,7 @@ static int dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port,
 }
 
 static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port,
-				   const struct switchdev_obj_port_vlan *vlan,
-				   struct switchdev_trans *trans)
+				   const struct switchdev_obj_port_vlan *vlan)
 {
 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
 	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index b24566b..2dead7f 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -249,6 +249,28 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
 	return -EIO;
 }
 
+static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask)
+{
+	int i;
+
+	for (i = 0; i < 25; i++) {
+		u32 reg;
+		int ret;
+
+		ret = lan9303_read(chip->regmap, offset, &reg);
+		if (ret) {
+			dev_err(chip->dev, "%s failed to read offset %d: %d\n",
+				__func__, offset, ret);
+			return ret;
+		}
+		if (!(reg & mask))
+			return 0;
+		usleep_range(1000, 2000);
+	}
+
+	return -ETIMEDOUT;
+}
+
 static int lan9303_virt_phy_reg_read(struct lan9303 *chip, int regnum)
 {
 	int ret;
@@ -274,22 +296,8 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val)
 
 static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip)
 {
-	int ret, i;
-	u32 reg;
-
-	for (i = 0; i < 25; i++) {
-		ret = lan9303_read(chip->regmap, LAN9303_PMI_ACCESS, &reg);
-		if (ret) {
-			dev_err(chip->dev,
-				"Failed to read pmi access status: %d\n", ret);
-			return ret;
-		}
-		if (!(reg & LAN9303_PMI_ACCESS_MII_BUSY))
-			return 0;
-		usleep_range(1000, 2000);
-	}
-
-	return -EIO;
+	return lan9303_read_wait(chip, LAN9303_PMI_ACCESS,
+				 LAN9303_PMI_ACCESS_MII_BUSY);
 }
 
 static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum)
@@ -366,22 +374,8 @@ EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops);
 
 static int lan9303_switch_wait_for_completion(struct lan9303 *chip)
 {
-	int ret, i;
-	u32 reg;
-
-	for (i = 0; i < 25; i++) {
-		ret = lan9303_read(chip->regmap, LAN9303_SWITCH_CSR_CMD, &reg);
-		if (ret) {
-			dev_err(chip->dev,
-				"Failed to read csr command status: %d\n", ret);
-			return ret;
-		}
-		if (!(reg & LAN9303_SWITCH_CSR_CMD_BUSY))
-			return 0;
-		usleep_range(1000, 2000);
-	}
-
-	return -EIO;
+	return lan9303_read_wait(chip, LAN9303_SWITCH_CSR_CMD,
+				 LAN9303_SWITCH_CSR_CMD_BUSY);
 }
 
 static int lan9303_write_switch_reg(struct lan9303 *chip, u16 regnum, u32 val)
@@ -485,7 +479,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
 {
 	int reg;
 
-	/* depending on the 'phy_addr_sel_strap' setting, the three phys are
+	/* Calculate chip->phy_addr_base:
+	 * Depending on the 'phy_addr_sel_strap' setting, the three phys are
 	 * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the
 	 * 'phy_addr_sel_strap' setting directly, so we need a test, which
 	 * configuration is active:
@@ -500,13 +495,10 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip)
 		return reg;
 	}
 
-	if ((reg != 0) && (reg != 0xffff))
-		chip->phy_addr_sel_strap = 1;
-	else
-		chip->phy_addr_sel_strap = 0;
+	chip->phy_addr_base = reg != 0 && reg != 0xffff;
 
 	dev_dbg(chip->dev, "Phy setup '%s' detected\n",
-		chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2");
+		chip->phy_addr_base ? "1-2-3" : "0-1-2");
 
 	return 0;
 }
@@ -546,20 +538,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr)
 	return NULL;
 }
 
-/* Wait a while until mask & reg == value. Otherwise return timeout. */
-static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno,
-				int mask, char value)
+static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask)
 {
 	int i;
 
-	for (i = 0; i < 0x1000; i++) {
+	for (i = 0; i < 25; i++) {
 		u32 reg;
 
 		lan9303_read_switch_reg(chip, regno, &reg);
-		if ((reg & mask) == value)
+		if (!(reg & mask))
 			return 0;
 		usleep_range(1000, 2000);
 	}
+
 	return -ETIMEDOUT;
 }
 
@@ -569,8 +560,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1)
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
 				 LAN9303_ALR_CMD_MAKE_ENTRY);
-	lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND,
-			     0);
+	lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
 
 	return 0;
@@ -583,6 +573,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
 {
 	int i;
 
+	mutex_lock(&chip->alr_mutex);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD,
 				 LAN9303_ALR_CMD_GET_FIRST);
 	lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
@@ -606,6 +597,7 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx)
 					 LAN9303_ALR_CMD_GET_NEXT);
 		lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0);
 	}
+	mutex_unlock(&chip->alr_mutex);
 }
 
 static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6])
@@ -694,16 +686,20 @@ static int lan9303_alr_add_port(struct lan9303 *chip, const u8 *mac, int port,
 {
 	struct lan9303_alr_cache_entry *entr;
 
+	mutex_lock(&chip->alr_mutex);
 	entr = lan9303_alr_cache_find_mac(chip, mac);
 	if (!entr) { /*New entry */
 		entr = lan9303_alr_cache_find_free(chip);
-		if (!entr)
+		if (!entr) {
+			mutex_unlock(&chip->alr_mutex);
 			return -ENOSPC;
+		}
 		ether_addr_copy(entr->mac_addr, mac);
 	}
 	entr->port_map |= BIT(port);
 	entr->stp_override = stp_override;
 	lan9303_alr_set_entry(chip, mac, entr->port_map, stp_override);
+	mutex_unlock(&chip->alr_mutex);
 
 	return 0;
 }
@@ -713,15 +709,18 @@ static int lan9303_alr_del_port(struct lan9303 *chip, const u8 *mac, int port)
 {
 	struct lan9303_alr_cache_entry *entr;
 
+	mutex_lock(&chip->alr_mutex);
 	entr = lan9303_alr_cache_find_mac(chip, mac);
 	if (!entr)
-		return 0;  /* no static entry found */
+		goto out;  /* no static entry found */
 
 	entr->port_map &= ~BIT(port);
 	if (entr->port_map == 0) /* zero means its free again */
 		eth_zero_addr(entr->mac_addr);
 	lan9303_alr_set_entry(chip, mac, entr->port_map, entr->stp_override);
 
+out:
+	mutex_unlock(&chip->alr_mutex);
 	return 0;
 }
 
@@ -866,7 +865,7 @@ static int lan9303_check_device(struct lan9303 *chip)
 	if ((reg >> 16) != LAN9303_CHIP_ID) {
 		dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
 			reg >> 16);
-		return ret;
+		return -ENODEV;
 	}
 
 	/* The default state of the LAN9303 device is to forward packets between
@@ -1018,7 +1017,7 @@ static int lan9303_get_sset_count(struct dsa_switch *ds)
 static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
 {
 	struct lan9303 *chip = ds->priv;
-	int phy_base = chip->phy_addr_sel_strap;
+	int phy_base = chip->phy_addr_base;
 
 	if (phy == phy_base)
 		return lan9303_virt_phy_reg_read(chip, regnum);
@@ -1032,7 +1031,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
 			     u16 val)
 {
 	struct lan9303 *chip = ds->priv;
-	int phy_base = chip->phy_addr_sel_strap;
+	int phy_base = chip->phy_addr_base;
 
 	if (phy == phy_base)
 		return lan9303_virt_phy_reg_write(chip, regnum, val);
@@ -1069,7 +1068,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
 
 	res =  lan9303_phy_write(ds, port, MII_BMCR, ctl);
 
-	if (port == chip->phy_addr_sel_strap) {
+	if (port == chip->phy_addr_base) {
 		/* Virtual Phy: Remove Turbo 200Mbit mode */
 		lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
 
@@ -1093,8 +1092,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port,
 	struct lan9303 *chip = ds->priv;
 
 	lan9303_disable_processing_port(chip, port);
-	lan9303_phy_write(ds, chip->phy_addr_sel_strap + port,
-			  MII_BMCR, BMCR_PDOWN);
+	lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
 }
 
 static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1217,8 +1215,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port,
 }
 
 static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
-				    const struct switchdev_obj_port_mdb *mdb,
-				    struct switchdev_trans *trans)
+				    const struct switchdev_obj_port_mdb *mdb)
 {
 	struct lan9303 *chip = ds->priv;
 
@@ -1235,8 +1232,7 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
 }
 
 static void lan9303_port_mdb_add(struct dsa_switch *ds, int port,
-				 const struct switchdev_obj_port_mdb *mdb,
-				 struct switchdev_trans *trans)
+				 const struct switchdev_obj_port_mdb *mdb)
 {
 	struct lan9303 *chip = ds->priv;
 
@@ -1284,13 +1280,16 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
 
 static int lan9303_register_switch(struct lan9303 *chip)
 {
+	int base;
+
 	chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS);
 	if (!chip->ds)
 		return -ENOMEM;
 
 	chip->ds->priv = chip;
 	chip->ds->ops = &lan9303_switch_ops;
-	chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7;
+	base = chip->phy_addr_base;
+	chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
 
 	return dsa_register_switch(chip->ds);
 }
@@ -1325,6 +1324,7 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
 	int ret;
 
 	mutex_init(&chip->indirect_mutex);
+	mutex_init(&chip->alr_mutex);
 
 	lan9303_probe_reset_gpio(chip, np);
 
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index b5be93a..663b0d5 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -559,8 +559,7 @@ static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
 }
 
 static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
-				 const struct switchdev_obj_port_vlan *vlan,
-				 struct switchdev_trans *trans)
+				 const struct switchdev_obj_port_vlan *vlan)
 {
 	/* nothing needed */
 
@@ -568,8 +567,7 @@ static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
 }
 
 static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
-			      const struct switchdev_obj_port_vlan *vlan,
-			      struct switchdev_trans *trans)
+			      const struct switchdev_obj_port_vlan *vlan)
 {
 	struct ksz_device *dev = ds->priv;
 	u32 vlan_table[3];
@@ -858,16 +856,14 @@ static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
 }
 
 static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
-				const struct switchdev_obj_port_mdb *mdb,
-				struct switchdev_trans *trans)
+				const struct switchdev_obj_port_mdb *mdb)
 {
 	/* nothing to do */
 	return 0;
 }
 
 static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
-			     const struct switchdev_obj_port_mdb *mdb,
-			     struct switchdev_trans *trans)
+			     const struct switchdev_obj_port_mdb *mdb)
 {
 	struct ksz_device *dev = ds->priv;
 	u32 static_table[4];
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2820d69..8a0bb00 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -805,6 +805,69 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
 }
 
 static void
+mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
+{
+	struct mt7530_priv *priv = ds->priv;
+	bool all_user_ports_removed = true;
+	int i;
+
+	/* When a port is removed from the bridge, the port would be set up
+	 * back to the default as is at initial boot which is a VLAN-unaware
+	 * port.
+	 */
+	mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+		   MT7530_PORT_MATRIX_MODE);
+	mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+		   VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
+
+	priv->ports[port].vlan_filtering = false;
+
+	for (i = 0; i < MT7530_NUM_PORTS; i++) {
+		if (dsa_is_user_port(ds, i) &&
+		    priv->ports[i].vlan_filtering) {
+			all_user_ports_removed = false;
+			break;
+		}
+	}
+
+	/* CPU port also does the same thing until all user ports belonging to
+	 * the CPU port get out of VLAN filtering mode.
+	 */
+	if (all_user_ports_removed) {
+		mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+			     PCR_MATRIX(dsa_user_ports(priv->ds)));
+		mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT),
+			     PORT_SPEC_TAG);
+	}
+}
+
+static void
+mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
+{
+	struct mt7530_priv *priv = ds->priv;
+
+	/* The real fabric path would be decided on the membership in the
+	 * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
+	 * means potential VLAN can be consisting of certain subset of all
+	 * ports.
+	 */
+	mt7530_rmw(priv, MT7530_PCR_P(port),
+		   PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
+
+	/* Trapped into security mode allows packet forwarding through VLAN
+	 * table lookup.
+	 */
+	mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
+		   MT7530_PORT_SECURITY_MODE);
+
+	/* Set the port as a user port which is to be able to recognize VID
+	 * from incoming packets before fetching entry within the VLAN table.
+	 */
+	mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+		   VLAN_ATTR(MT7530_VLAN_USER));
+}
+
+static void
 mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
 			 struct net_device *bridge)
 {
@@ -817,8 +880,11 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
 		/* Remove this port from the port matrix of the other ports
 		 * in the same bridge. If the port is disabled, port matrix
 		 * is kept and not being setup until the port becomes enabled.
+		 * And the other port's port matrix cannot be broken when the
+		 * other port is still a VLAN-aware port.
 		 */
-		if (dsa_is_user_port(ds, i) && i != port) {
+		if (!priv->ports[i].vlan_filtering &&
+		    dsa_is_user_port(ds, i) && i != port) {
 			if (dsa_to_port(ds, i)->bridge_dev != bridge)
 				continue;
 			if (priv->ports[i].enable)
@@ -836,6 +902,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
 			   PCR_MATRIX(BIT(MT7530_CPU_PORT)));
 	priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
 
+	mt7530_port_set_vlan_unaware(ds, port);
+
 	mutex_unlock(&priv->reg_mutex);
 }
 
@@ -906,6 +974,220 @@ mt7530_port_fdb_dump(struct dsa_switch *ds, int port,
 	return 0;
 }
 
+static int
+mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
+{
+	struct mt7530_dummy_poll p;
+	u32 val;
+	int ret;
+
+	val = VTCR_BUSY | VTCR_FUNC(cmd) | vid;
+	mt7530_write(priv, MT7530_VTCR, val);
+
+	INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_VTCR);
+	ret = readx_poll_timeout(_mt7530_read, &p, val,
+				 !(val & VTCR_BUSY), 20, 20000);
+	if (ret < 0) {
+		dev_err(priv->dev, "poll timeout\n");
+		return ret;
+	}
+
+	val = mt7530_read(priv, MT7530_VTCR);
+	if (val & VTCR_INVALID) {
+		dev_err(priv->dev, "read VTCR invalid\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
+			   bool vlan_filtering)
+{
+	struct mt7530_priv *priv = ds->priv;
+
+	priv->ports[port].vlan_filtering = vlan_filtering;
+
+	if (vlan_filtering) {
+		/* The port is being kept as VLAN-unaware port when bridge is
+		 * set up with vlan_filtering not being set, Otherwise, the
+		 * port and the corresponding CPU port is required the setup
+		 * for becoming a VLAN-aware port.
+		 */
+		mt7530_port_set_vlan_aware(ds, port);
+		mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+	}
+
+	return 0;
+}
+
+static int
+mt7530_port_vlan_prepare(struct dsa_switch *ds, int port,
+			 const struct switchdev_obj_port_vlan *vlan)
+{
+	/* nothing needed */
+
+	return 0;
+}
+
+static void
+mt7530_hw_vlan_add(struct mt7530_priv *priv,
+		   struct mt7530_hw_vlan_entry *entry)
+{
+	u8 new_members;
+	u32 val;
+
+	new_members = entry->old_members | BIT(entry->port) |
+		      BIT(MT7530_CPU_PORT);
+
+	/* Validate the entry with independent learning, create egress tag per
+	 * VLAN and joining the port as one of the port members.
+	 */
+	val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) | VLAN_VALID;
+	mt7530_write(priv, MT7530_VAWD1, val);
+
+	/* Decide whether adding tag or not for those outgoing packets from the
+	 * port inside the VLAN.
+	 */
+	val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
+				MT7530_VLAN_EGRESS_TAG;
+	mt7530_rmw(priv, MT7530_VAWD2,
+		   ETAG_CTRL_P_MASK(entry->port),
+		   ETAG_CTRL_P(entry->port, val));
+
+	/* CPU port is always taken as a tagged port for serving more than one
+	 * VLANs across and also being applied with egress type stack mode for
+	 * that VLAN tags would be appended after hardware special tag used as
+	 * DSA tag.
+	 */
+	mt7530_rmw(priv, MT7530_VAWD2,
+		   ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
+		   ETAG_CTRL_P(MT7530_CPU_PORT,
+			       MT7530_VLAN_EGRESS_STACK));
+}
+
+static void
+mt7530_hw_vlan_del(struct mt7530_priv *priv,
+		   struct mt7530_hw_vlan_entry *entry)
+{
+	u8 new_members;
+	u32 val;
+
+	new_members = entry->old_members & ~BIT(entry->port);
+
+	val = mt7530_read(priv, MT7530_VAWD1);
+	if (!(val & VLAN_VALID)) {
+		dev_err(priv->dev,
+			"Cannot be deleted due to invalid entry\n");
+		return;
+	}
+
+	/* If certain member apart from CPU port is still alive in the VLAN,
+	 * the entry would be kept valid. Otherwise, the entry is got to be
+	 * disabled.
+	 */
+	if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+		val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
+		      VLAN_VALID;
+		mt7530_write(priv, MT7530_VAWD1, val);
+	} else {
+		mt7530_write(priv, MT7530_VAWD1, 0);
+		mt7530_write(priv, MT7530_VAWD2, 0);
+	}
+}
+
+static void
+mt7530_hw_vlan_update(struct mt7530_priv *priv, u16 vid,
+		      struct mt7530_hw_vlan_entry *entry,
+		      mt7530_vlan_op vlan_op)
+{
+	u32 val;
+
+	/* Fetch entry */
+	mt7530_vlan_cmd(priv, MT7530_VTCR_RD_VID, vid);
+
+	val = mt7530_read(priv, MT7530_VAWD1);
+
+	entry->old_members = (val >> PORT_MEM_SHFT) & PORT_MEM_MASK;
+
+	/* Manipulate entry */
+	vlan_op(priv, entry);
+
+	/* Flush result to hardware */
+	mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, vid);
+}
+
+static void
+mt7530_port_vlan_add(struct dsa_switch *ds, int port,
+		     const struct switchdev_obj_port_vlan *vlan)
+{
+	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+	bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+	struct mt7530_hw_vlan_entry new_entry;
+	struct mt7530_priv *priv = ds->priv;
+	u16 vid;
+
+	/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+	 * being set.
+	 */
+	if (!priv->ports[port].vlan_filtering)
+		return;
+
+	mutex_lock(&priv->reg_mutex);
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+		mt7530_hw_vlan_entry_init(&new_entry, port, untagged);
+		mt7530_hw_vlan_update(priv, vid, &new_entry,
+				      mt7530_hw_vlan_add);
+	}
+
+	if (pvid) {
+		mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
+			   G0_PORT_VID(vlan->vid_end));
+		priv->ports[port].pvid = vlan->vid_end;
+	}
+
+	mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+mt7530_port_vlan_del(struct dsa_switch *ds, int port,
+		     const struct switchdev_obj_port_vlan *vlan)
+{
+	struct mt7530_hw_vlan_entry target_entry;
+	struct mt7530_priv *priv = ds->priv;
+	u16 vid, pvid;
+
+	/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
+	 * being set.
+	 */
+	if (!priv->ports[port].vlan_filtering)
+		return 0;
+
+	mutex_lock(&priv->reg_mutex);
+
+	pvid = priv->ports[port].pvid;
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+		mt7530_hw_vlan_entry_init(&target_entry, port, 0);
+		mt7530_hw_vlan_update(priv, vid, &target_entry,
+				      mt7530_hw_vlan_del);
+
+		/* PVID is being restored to the default whenever the PVID port
+		 * is being removed from the VLAN.
+		 */
+		if (pvid == vid)
+			pvid = G0_PORT_VID_DEF;
+	}
+
+	mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK, pvid);
+	priv->ports[port].pvid = pvid;
+
+	mutex_unlock(&priv->reg_mutex);
+
+	return 0;
+}
+
 static enum dsa_tag_protocol
 mtk_get_tag_protocol(struct dsa_switch *ds, int port)
 {
@@ -1035,6 +1317,10 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
 	.port_fdb_add		= mt7530_port_fdb_add,
 	.port_fdb_del		= mt7530_port_fdb_del,
 	.port_fdb_dump		= mt7530_port_fdb_dump,
+	.port_vlan_filtering	= mt7530_port_vlan_filtering,
+	.port_vlan_prepare	= mt7530_port_vlan_prepare,
+	.port_vlan_add		= mt7530_port_vlan_add,
+	.port_vlan_del		= mt7530_port_vlan_del,
 };
 
 static int
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 74db982..d9b407a 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -17,6 +17,7 @@
 #define MT7530_NUM_PORTS		7
 #define MT7530_CPU_PORT			6
 #define MT7530_NUM_FDB_RECORDS		2048
+#define MT7530_ALL_MEMBERS		0xff
 
 #define	NUM_TRGMII_CTRL			5
 
@@ -88,21 +89,42 @@ enum mt7530_fdb_cmd {
 /* Register for vlan table control */
 #define MT7530_VTCR			0x90
 #define  VTCR_BUSY			BIT(31)
-#define  VTCR_FUNC			(((x) & 0xf) << 12)
-#define  VTCR_FUNC_RD_VID		0x1
-#define  VTCR_FUNC_WR_VID		0x2
-#define  VTCR_FUNC_INV_VID		0x3
-#define  VTCR_FUNC_VAL_VID		0x4
+#define  VTCR_INVALID			BIT(16)
+#define  VTCR_FUNC(x)			(((x) & 0xf) << 12)
 #define  VTCR_VID			((x) & 0xfff)
 
+enum mt7530_vlan_cmd {
+	/* Read/Write the specified VID entry from VAWD register based
+	 * on VID.
+	 */
+	MT7530_VTCR_RD_VID = 0,
+	MT7530_VTCR_WR_VID = 1,
+};
+
 /* Register for setup vlan and acl write data */
 #define MT7530_VAWD1			0x94
 #define  PORT_STAG			BIT(31)
+/* Independent VLAN Learning */
 #define  IVL_MAC			BIT(30)
+/* Per VLAN Egress Tag Control */
+#define  VTAG_EN			BIT(28)
+/* VLAN Member Control */
 #define  PORT_MEM(x)			(((x) & 0xff) << 16)
-#define  VALID				BIT(1)
+/* VLAN Entry Valid */
+#define  VLAN_VALID			BIT(0)
+#define  PORT_MEM_SHFT			16
+#define  PORT_MEM_MASK			0xff
 
 #define MT7530_VAWD2			0x98
+/* Egress Tag Control */
+#define  ETAG_CTRL_P(p, x)		(((x) & 0x3) << ((p) << 1))
+#define  ETAG_CTRL_P_MASK(p)		ETAG_CTRL_P(p, 3)
+
+enum mt7530_vlan_egress_attr {
+	MT7530_VLAN_EGRESS_UNTAG = 0,
+	MT7530_VLAN_EGRESS_TAG = 2,
+	MT7530_VLAN_EGRESS_STACK = 3,
+};
 
 /* Register for port STP state control */
 #define MT7530_SSP_P(x)			(0x2000 + ((x) * 0x100))
@@ -120,11 +142,23 @@ enum mt7530_stp_state {
 /* Register for port control */
 #define MT7530_PCR_P(x)			(0x2004 + ((x) * 0x100))
 #define  PORT_VLAN(x)			((x) & 0x3)
+
+enum mt7530_port_mode {
+	/* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
+	MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
+
+	/* Security Mode: Discard any frame due to ingress membership
+	 * violation or VID missed on the VLAN table.
+	 */
+	MT7530_PORT_SECURITY_MODE = PORT_VLAN(3),
+};
+
 #define  PCR_MATRIX(x)			(((x) & 0xff) << 16)
 #define  PORT_PRI(x)			(((x) & 0x7) << 24)
 #define  EG_TAG(x)			(((x) & 0x3) << 28)
 #define  PCR_MATRIX_MASK		PCR_MATRIX(0xff)
 #define  PCR_MATRIX_CLR			PCR_MATRIX(0)
+#define  PCR_PORT_VLAN_MASK		PORT_VLAN(3)
 
 /* Register for port security control */
 #define MT7530_PSC_P(x)			(0x200c + ((x) * 0x100))
@@ -134,10 +168,20 @@ enum mt7530_stp_state {
 #define MT7530_PVC_P(x)			(0x2010 + ((x) * 0x100))
 #define  PORT_SPEC_TAG			BIT(5)
 #define  VLAN_ATTR(x)			(((x) & 0x3) << 6)
+#define  VLAN_ATTR_MASK			VLAN_ATTR(3)
+
+enum mt7530_vlan_port_attr {
+	MT7530_VLAN_USER = 0,
+	MT7530_VLAN_TRANSPARENT = 3,
+};
+
 #define  STAG_VPID			(((x) & 0xffff) << 16)
 
 /* Register for port port-and-protocol based vlan 1 control */
 #define MT7530_PPBV1_P(x)		(0x2014 + ((x) * 0x100))
+#define  G0_PORT_VID(x)			(((x) & 0xfff) << 0)
+#define  G0_PORT_VID_MASK		G0_PORT_VID(0xfff)
+#define  G0_PORT_VID_DEF		G0_PORT_VID(1)
 
 /* Register for port MAC control register */
 #define MT7530_PMCR_P(x)		(0x3000 + ((x) * 0x100))
@@ -345,9 +389,20 @@ struct mt7530_fdb {
 	bool noarp;
 };
 
+/* struct mt7530_port -	This is the main data structure for holding the state
+ *			of the port.
+ * @enable:	The status used for show port is enabled or not.
+ * @pm:		The matrix used to show all connections with the port.
+ * @pvid:	The VLAN specified is to be considered a PVID at ingress.  Any
+ *		untagged frames will be assigned to the related VLAN.
+ * @vlan_filtering: The flags indicating whether the port that can recognize
+ *		    VLAN-tagged frames.
+ */
 struct mt7530_port {
 	bool enable;
 	u32 pm;
+	u16 pvid;
+	bool vlan_filtering;
 };
 
 /* struct mt7530_priv -	This is the main data structure for holding the state
@@ -382,6 +437,22 @@ struct mt7530_priv {
 	struct mutex reg_mutex;
 };
 
+struct mt7530_hw_vlan_entry {
+	int port;
+	u8  old_members;
+	bool untagged;
+};
+
+static inline void mt7530_hw_vlan_entry_init(struct mt7530_hw_vlan_entry *e,
+					     int port, bool untagged)
+{
+	e->port = port;
+	e->untagged = untagged;
+}
+
+typedef void (*mt7530_vlan_op)(struct mt7530_priv *,
+			       struct mt7530_hw_vlan_entry *);
+
 struct mt7530_hw_stats {
 	const char	*string;
 	u16		reg;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 66d33e9..fc512c9 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1185,8 +1185,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
 
 static int
 mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
-			    const struct switchdev_obj_port_vlan *vlan,
-			    struct switchdev_trans *trans)
+			    const struct switchdev_obj_port_vlan *vlan)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
 	int err;
@@ -1295,8 +1294,7 @@ static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
 }
 
 static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
-				    const struct switchdev_obj_port_vlan *vlan,
-				    struct switchdev_trans *trans)
+				    const struct switchdev_obj_port_vlan *vlan)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1725,9 +1723,11 @@ static int mv88e6xxx_setup_message_port(struct mv88e6xxx_chip *chip, int port)
 
 static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
 {
-	bool flood = port == dsa_upstream_port(chip->ds);
+	struct dsa_switch *ds = chip->ds;
+	bool flood;
 
 	/* Upstream ports flood frames with unknown unicast or multicast DA */
+	flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port);
 	if (chip->info->ops->port_set_egress_floods)
 		return chip->info->ops->port_set_egress_floods(chip, port,
 							       flood, flood);
@@ -1744,6 +1744,39 @@ static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
 	return 0;
 }
 
+static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
+{
+	struct dsa_switch *ds = chip->ds;
+	int upstream_port;
+	int err;
+
+	upstream_port = dsa_upstream_port(ds, port);
+	if (chip->info->ops->port_set_upstream_port) {
+		err = chip->info->ops->port_set_upstream_port(chip, port,
+							      upstream_port);
+		if (err)
+			return err;
+	}
+
+	if (port == upstream_port) {
+		if (chip->info->ops->set_cpu_port) {
+			err = chip->info->ops->set_cpu_port(chip,
+							    upstream_port);
+			if (err)
+				return err;
+		}
+
+		if (chip->info->ops->set_egress_port) {
+			err = chip->info->ops->set_egress_port(chip,
+							       upstream_port);
+			if (err)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
 static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
 {
 	struct dsa_switch *ds = chip->ds;
@@ -1814,13 +1847,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
 	if (err)
 		return err;
 
-	reg = 0;
-	if (chip->info->ops->port_set_upstream_port) {
-		err = chip->info->ops->port_set_upstream_port(
-			chip, port, dsa_upstream_port(ds));
-		if (err)
-			return err;
-	}
+	err = mv88e6xxx_setup_upstream_port(chip, port);
+	if (err)
+		return err;
 
 	err = mv88e6xxx_port_set_8021q_mode(chip, port,
 				MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
@@ -1946,21 +1975,8 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
 static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip)
 {
 	struct dsa_switch *ds = chip->ds;
-	u32 upstream_port = dsa_upstream_port(ds);
 	int err;
 
-	if (chip->info->ops->set_cpu_port) {
-		err = chip->info->ops->set_cpu_port(chip, upstream_port);
-		if (err)
-			return err;
-	}
-
-	if (chip->info->ops->set_egress_port) {
-		err = chip->info->ops->set_egress_port(chip, upstream_port);
-		if (err)
-			return err;
-	}
-
 	/* Disable remote management, and set the switch's DSA device number. */
 	err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2,
 				 MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE |
@@ -3741,6 +3757,7 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
 	return chip->info->tag_protocol;
 }
 
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
 				       struct device *host_dev, int sw_addr,
 				       void **priv)
@@ -3788,10 +3805,10 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
 
 	return NULL;
 }
+#endif
 
 static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
-				      const struct switchdev_obj_port_mdb *mdb,
-				      struct switchdev_trans *trans)
+				      const struct switchdev_obj_port_mdb *mdb)
 {
 	/* We don't need any dynamic resource from the kernel (yet),
 	 * so skip the prepare phase.
@@ -3801,8 +3818,7 @@ static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
 }
 
 static void mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
-				   const struct switchdev_obj_port_mdb *mdb,
-				   struct switchdev_trans *trans)
+				   const struct switchdev_obj_port_mdb *mdb)
 {
 	struct mv88e6xxx_chip *chip = ds->priv;
 
@@ -3829,7 +3845,9 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
 }
 
 static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 	.probe			= mv88e6xxx_drv_probe,
+#endif
 	.get_tag_protocol	= mv88e6xxx_get_tag_protocol,
 	.setup			= mv88e6xxx_setup,
 	.adjust_link		= mv88e6xxx_adjust_link,
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 58483af..30b1c85 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -42,48 +42,7 @@
 #define DRV_NAME	"dummy"
 #define DRV_VERSION	"1.0"
 
-#undef pr_fmt
-#define pr_fmt(fmt) DRV_NAME ": " fmt
-
 static int numdummies = 1;
-static int num_vfs;
-
-struct vf_data_storage {
-	u8	vf_mac[ETH_ALEN];
-	u16	pf_vlan; /* When set, guest VLAN config not allowed. */
-	u16	pf_qos;
-	__be16	vlan_proto;
-	u16	min_tx_rate;
-	u16	max_tx_rate;
-	u8	spoofchk_enabled;
-	bool	rss_query_enabled;
-	u8	trusted;
-	int	link_state;
-};
-
-struct dummy_priv {
-	struct vf_data_storage	*vfinfo;
-};
-
-static int dummy_num_vf(struct device *dev)
-{
-	return num_vfs;
-}
-
-static struct bus_type dummy_bus = {
-	.name	= "dummy",
-	.num_vf	= dummy_num_vf,
-};
-
-static void release_dummy_parent(struct device *dev)
-{
-}
-
-static struct device dummy_parent = {
-	.init_name	= "dummy",
-	.bus		= &dummy_bus,
-	.release	= release_dummy_parent,
-};
 
 /* fake multicast ability */
 static void set_multicast_list(struct net_device *dev)
@@ -133,25 +92,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int dummy_dev_init(struct net_device *dev)
 {
-	struct dummy_priv *priv = netdev_priv(dev);
-
 	dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
 	if (!dev->dstats)
 		return -ENOMEM;
 
-	priv->vfinfo = NULL;
-
-	if (!num_vfs)
-		return 0;
-
-	dev->dev.parent = &dummy_parent;
-	priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
-			       GFP_KERNEL);
-	if (!priv->vfinfo) {
-		free_percpu(dev->dstats);
-		return -ENOMEM;
-	}
-
 	return 0;
 }
 
@@ -169,117 +113,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
 	return 0;
 }
 
-static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
-		return -EINVAL;
-
-	memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN);
-
-	return 0;
-}
-
-static int dummy_set_vf_vlan(struct net_device *dev, int vf,
-			     u16 vlan, u8 qos, __be16 vlan_proto)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7))
-		return -EINVAL;
-
-	priv->vfinfo[vf].pf_vlan = vlan;
-	priv->vfinfo[vf].pf_qos = qos;
-	priv->vfinfo[vf].vlan_proto = vlan_proto;
-
-	return 0;
-}
-
-static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].min_tx_rate = min;
-	priv->vfinfo[vf].max_tx_rate = max;
-
-	return 0;
-}
-
-static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].spoofchk_enabled = val;
-
-	return 0;
-}
-
-static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].rss_query_enabled = val;
-
-	return 0;
-}
-
-static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].trusted = val;
-
-	return 0;
-}
-
-static int dummy_get_vf_config(struct net_device *dev,
-			       int vf, struct ifla_vf_info *ivi)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	ivi->vf = vf;
-	memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN);
-	ivi->vlan = priv->vfinfo[vf].pf_vlan;
-	ivi->qos = priv->vfinfo[vf].pf_qos;
-	ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled;
-	ivi->linkstate = priv->vfinfo[vf].link_state;
-	ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate;
-	ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate;
-	ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled;
-	ivi->trusted = priv->vfinfo[vf].trusted;
-	ivi->vlan_proto = priv->vfinfo[vf].vlan_proto;
-
-	return 0;
-}
-
-static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	if (vf >= num_vfs)
-		return -EINVAL;
-
-	priv->vfinfo[vf].link_state = state;
-
-	return 0;
-}
-
 static const struct net_device_ops dummy_netdev_ops = {
 	.ndo_init		= dummy_dev_init,
 	.ndo_uninit		= dummy_dev_uninit,
@@ -289,14 +122,6 @@ static const struct net_device_ops dummy_netdev_ops = {
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_get_stats64	= dummy_get_stats64,
 	.ndo_change_carrier	= dummy_change_carrier,
-	.ndo_set_vf_mac		= dummy_set_vf_mac,
-	.ndo_set_vf_vlan	= dummy_set_vf_vlan,
-	.ndo_set_vf_rate	= dummy_set_vf_rate,
-	.ndo_set_vf_spoofchk	= dummy_set_vf_spoofchk,
-	.ndo_set_vf_trust	= dummy_set_vf_trust,
-	.ndo_get_vf_config	= dummy_get_vf_config,
-	.ndo_set_vf_link_state	= dummy_set_vf_link_state,
-	.ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en,
 };
 
 static void dummy_get_drvinfo(struct net_device *dev,
@@ -323,13 +148,6 @@ static const struct ethtool_ops dummy_ethtool_ops = {
 	.get_ts_info		= dummy_get_ts_info,
 };
 
-static void dummy_free_netdev(struct net_device *dev)
-{
-	struct dummy_priv *priv = netdev_priv(dev);
-
-	kfree(priv->vfinfo);
-}
-
 static void dummy_setup(struct net_device *dev)
 {
 	ether_setup(dev);
@@ -338,7 +156,6 @@ static void dummy_setup(struct net_device *dev)
 	dev->netdev_ops = &dummy_netdev_ops;
 	dev->ethtool_ops = &dummy_ethtool_ops;
 	dev->needs_free_netdev = true;
-	dev->priv_destructor = dummy_free_netdev;
 
 	/* Fill in device structure with ethernet-generic values. */
 	dev->flags |= IFF_NOARP;
@@ -370,7 +187,6 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[],
 
 static struct rtnl_link_ops dummy_link_ops __read_mostly = {
 	.kind		= DRV_NAME,
-	.priv_size	= sizeof(struct dummy_priv),
 	.setup		= dummy_setup,
 	.validate	= dummy_validate,
 };
@@ -379,16 +195,12 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = {
 module_param(numdummies, int, 0);
 MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
 
-module_param(num_vfs, int, 0);
-MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device");
-
 static int __init dummy_init_one(void)
 {
 	struct net_device *dev_dummy;
 	int err;
 
-	dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
-				 "dummy%d", NET_NAME_ENUM, dummy_setup);
+	dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_ENUM, dummy_setup);
 	if (!dev_dummy)
 		return -ENOMEM;
 
@@ -407,21 +219,6 @@ static int __init dummy_init_module(void)
 {
 	int i, err = 0;
 
-	if (num_vfs) {
-		err = bus_register(&dummy_bus);
-		if (err < 0) {
-			pr_err("registering dummy bus failed\n");
-			return err;
-		}
-
-		err = device_register(&dummy_parent);
-		if (err < 0) {
-			pr_err("registering dummy parent device failed\n");
-			bus_unregister(&dummy_bus);
-			return err;
-		}
-	}
-
 	rtnl_lock();
 	err = __rtnl_link_register(&dummy_link_ops);
 	if (err < 0)
@@ -437,22 +234,12 @@ static int __init dummy_init_module(void)
 out:
 	rtnl_unlock();
 
-	if (err && num_vfs) {
-		device_unregister(&dummy_parent);
-		bus_unregister(&dummy_bus);
-	}
-
 	return err;
 }
 
 static void __exit dummy_cleanup_module(void)
 {
 	rtnl_link_unregister(&dummy_link_ops);
-
-	if (num_vfs) {
-		device_unregister(&dummy_parent);
-		bus_unregister(&dummy_bus);
-	}
 }
 
 module_init(dummy_init_module);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c6042133..d50519e 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -170,6 +170,7 @@
 source "drivers/net/ethernet/sfc/Kconfig"
 source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
+source "drivers/net/ethernet/socionext/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 39f62733..6cf5ade 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -82,6 +82,7 @@
 obj-$(CONFIG_SFC_FALCON) += sfc/falcon/
 obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
+obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index a1a52eb..8f71b79 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -1436,13 +1436,13 @@ static int ace_init(struct net_device *dev)
 	ace_set_txprd(regs, ap, 0);
 	writel(0, &regs->RxRetCsm);
 
-       /*
-	* Enable DMA engine now.
-	* If we do this sooner, Mckinley box pukes.
-	* I assume it's because Tigon II DMA engine wants to check
-	* *something* even before the CPU is started.
-	*/
-       writel(1, &regs->AssistState);  /* enable DMA */
+	/*
+	 * Enable DMA engine now.
+	 * If we do this sooner, Mckinley box pukes.
+	 * I assume it's because Tigon II DMA engine wants to check
+	 * *something* even before the CPU is started.
+	 */
+	writel(1, &regs->AssistState);  /* enable DMA */
 
 	/*
 	 * Start the NIC CPU
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index b11e573..ea149c1 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
 
 	return 0;
 }
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+	struct ena_eth_io_rx_cdesc_base *cdesc;
+
+	cdesc = ena_com_get_next_rx_cdesc(io_cq);
+	if (cdesc)
+		return false;
+	else
+		return true;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index bb53c3a..2f76572 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
 
 int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
 
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
 static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
 				       struct ena_eth_io_intr_reg *intr_reg)
 {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index fbe21a81..6975150 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -161,6 +161,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
 	ring->per_napi_packets = 0;
 	ring->per_napi_bytes = 0;
 	ring->cpu = 0;
+	ring->first_interrupt = false;
+	ring->no_interrupt_event_cnt = 0;
 	u64_stats_init(&ring->syncp);
 }
 
@@ -1277,6 +1279,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
 {
 	struct ena_napi *ena_napi = data;
 
+	ena_napi->tx_ring->first_interrupt = true;
+	ena_napi->rx_ring->first_interrupt = true;
+
 	napi_schedule_irqoff(&ena_napi->napi);
 
 	return IRQ_HANDLED;
@@ -2663,8 +2668,32 @@ static void ena_fw_reset_device(struct work_struct *work)
 	rtnl_unlock();
 }
 
-static int check_missing_comp_in_queue(struct ena_adapter *adapter,
-				       struct ena_ring *tx_ring)
+static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+					struct ena_ring *rx_ring)
+{
+	if (likely(rx_ring->first_interrupt))
+		return 0;
+
+	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
+		return 0;
+
+	rx_ring->no_interrupt_event_cnt++;
+
+	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
+		netif_err(adapter, rx_err, adapter->netdev,
+			  "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+			  rx_ring->qid);
+		adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+		smp_mb__before_atomic();
+		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+					  struct ena_ring *tx_ring)
 {
 	struct ena_tx_buffer *tx_buf;
 	unsigned long last_jiffies;
@@ -2674,8 +2703,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
 	for (i = 0; i < tx_ring->ring_size; i++) {
 		tx_buf = &tx_ring->tx_buffer_info[i];
 		last_jiffies = tx_buf->last_jiffies;
-		if (unlikely(last_jiffies &&
-			     time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
+
+		if (last_jiffies == 0)
+			/* no pending Tx at this location */
+			continue;
+
+		if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
+			     2 * adapter->missing_tx_completion_to))) {
+			/* If after graceful period interrupt is still not
+			 * received, we schedule a reset
+			 */
+			netif_err(adapter, tx_err, adapter->netdev,
+				  "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+				  tx_ring->qid);
+			adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+			smp_mb__before_atomic();
+			set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+			return -EIO;
+		}
+
+		if (unlikely(time_is_before_jiffies(last_jiffies +
+				adapter->missing_tx_completion_to))) {
 			if (!tx_buf->print_once)
 				netif_notice(adapter, tx_err, adapter->netdev,
 					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@ -2704,9 +2752,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
 	return rc;
 }
 
-static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+static void check_for_missing_completions(struct ena_adapter *adapter)
 {
 	struct ena_ring *tx_ring;
+	struct ena_ring *rx_ring;
 	int i, budget, rc;
 
 	/* Make sure the driver doesn't turn the device in other process */
@@ -2725,8 +2774,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
 
 	for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
 		tx_ring = &adapter->tx_ring[i];
+		rx_ring = &adapter->rx_ring[i];
 
-		rc = check_missing_comp_in_queue(adapter, tx_ring);
+		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+		if (unlikely(rc))
+			return;
+
+		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
 		if (unlikely(rc))
 			return;
 
@@ -2885,7 +2939,7 @@ static void ena_timer_service(struct timer_list *t)
 
 	check_for_admin_com_state(adapter);
 
-	check_for_missing_tx_completions(adapter);
+	check_for_missing_completions(adapter);
 
 	check_for_empty_rx_ring(adapter);
 
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 3bbc003..f1972b5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -44,7 +44,7 @@
 #include "ena_eth_com.h"
 
 #define DRV_MODULE_VER_MAJOR	1
-#define DRV_MODULE_VER_MINOR	3
+#define DRV_MODULE_VER_MINOR	5
 #define DRV_MODULE_VER_SUBMINOR 0
 
 #define DRV_MODULE_NAME		"ena"
@@ -122,6 +122,7 @@
  * We wait for 6 sec just to be on the safe side.
  */
 #define ENA_DEVICE_KALIVE_TIMEOUT	(6 * HZ)
+#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3
 
 #define ENA_MMIO_DISABLE_REG_READ	BIT(0)
 
@@ -236,6 +237,9 @@ struct ena_ring {
 	/* The maximum header length the device can handle */
 	u8 tx_max_header_size;
 
+	bool first_interrupt;
+	u16 no_interrupt_event_cnt;
+
 	/* cpu for TPH */
 	int cpu;
 	 /* number of tx/rx_buffer_info's entries */
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 9aec43c..48ca97f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types {
 	ENA_REGS_RESET_USER_TRIGGER		= 12,
 
 	ENA_REGS_RESET_GENERIC			= 13,
+
+	ENA_REGS_RESET_MISS_INTERRUPT		= 14,
 };
 
 /* ena_registers offsets */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index a74a8fb..7a3ebfd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2930,9 +2930,8 @@ void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 {
 	struct ethhdr *eth = (struct ethhdr *)skb->data;
-	unsigned char *buf = skb->data;
 	unsigned char buffer[128];
-	unsigned int i, j;
+	unsigned int i;
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -2943,22 +2942,13 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
 	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
 	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-	for (i = 0, j = 0; i < skb->len;) {
-		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-			      buf[i++]);
+	for (i = 0; i < skb->len; i += 32) {
+		unsigned int len = min(skb->len - i, 32U);
 
-		if ((i % 32) == 0) {
-			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-			j = 0;
-		} else if ((i % 16) == 0) {
-			buffer[j++] = ' ';
-			buffer[j++] = ' ';
-		} else if ((i % 4) == 0) {
-			buffer[j++] = ' ';
-		}
+		hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+				   buffer, sizeof(buffer), false);
+		netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
 	}
-	if (i % 32)
-		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d9346e2..14a59e5 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1716,7 +1716,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
 	struct bcm63xx_enet_platform_data *pd;
 	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
 	struct mii_bus *bus;
-	const char *clk_name;
 	int i, ret;
 
 	if (!bcm_enet_shared_base[0])
@@ -1751,20 +1750,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
 	dev->irq = priv->irq = res_irq->start;
 	priv->irq_rx = res_irq_rx->start;
 	priv->irq_tx = res_irq_tx->start;
-	priv->mac_id = pdev->id;
 
-	/* get rx & tx dma channel id for this mac */
-	if (priv->mac_id == 0) {
-		priv->rx_chan = 0;
-		priv->tx_chan = 1;
-		clk_name = "enet0";
-	} else {
-		priv->rx_chan = 2;
-		priv->tx_chan = 3;
-		clk_name = "enet1";
-	}
-
-	priv->mac_clk = devm_clk_get(&pdev->dev, clk_name);
+	priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
 	if (IS_ERR(priv->mac_clk)) {
 		ret = PTR_ERR(priv->mac_clk);
 		goto out;
@@ -1795,9 +1782,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
 		priv->dma_chan_width = pd->dma_chan_width;
 		priv->dma_has_sram = pd->dma_has_sram;
 		priv->dma_desc_shift = pd->dma_desc_shift;
+		priv->rx_chan = pd->rx_chan;
+		priv->tx_chan = pd->tx_chan;
 	}
 
-	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+	if (priv->has_phy && !priv->use_external_mii) {
 		/* using internal PHY, enable clock */
 		priv->phy_clk = devm_clk_get(&pdev->dev, "ephy");
 		if (IS_ERR(priv->phy_clk)) {
@@ -1828,7 +1817,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
 		bus->priv = priv;
 		bus->read = bcm_enet_mdio_read_phylib;
 		bus->write = bcm_enet_mdio_write_phylib;
-		sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
+		sprintf(bus->id, "%s-%d", pdev->name, pdev->id);
 
 		/* only probe bus where we think the PHY is, because
 		 * the mdio read operation return 0 instead of 0xffff
@@ -2139,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev)
 
 	/* allocate rx dma ring */
 	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
-	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+	p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
 	if (!p) {
 		dev_err(kdev, "cannot allocate rx ring %u\n", size);
 		ret = -ENOMEM;
 		goto out_freeirq_tx;
 	}
 
-	memset(p, 0, size);
 	priv->rx_desc_alloc_size = size;
 	priv->rx_desc_cpu = p;
 
 	/* allocate tx dma ring */
 	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
-	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+	p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
 	if (!p) {
 		dev_err(kdev, "cannot allocate tx ring\n");
 		ret = -ENOMEM;
 		goto out_free_rx_ring;
 	}
 
-	memset(p, 0, size);
 	priv->tx_desc_alloc_size = size;
 	priv->tx_desc_cpu = p;
 
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 5a66728..1d3c917 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -193,9 +193,6 @@ struct bcm_enet_mib_counters {
 
 struct bcm_enet_priv {
 
-	/* mac id (from platform device id) */
-	int mac_id;
-
 	/* base remapped address of device */
 	void __iomem *base;
 
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 087f01b..f15a8fc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
 		goto out;
 	}
 
-	/* The Ethernet switch we are interfaced with needs packets to be at
-	 * least 64 bytes (including FCS) otherwise they will be discarded when
-	 * they enter the switch port logic. When Broadcom tags are enabled, we
-	 * need to make sure that packets are at least 68 bytes
-	 * (including FCS and tag) because the length verification is done after
-	 * the Broadcom tag is stripped off the ingress packet.
-	 */
-	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
-		ret = NETDEV_TX_OK;
-		goto out;
-	}
-
 	/* Insert TSB and checksum infos */
 	if (priv->tsb_en) {
 		skb = bcm_sysport_insert_tsb(skb, dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 1d96cd5..8eef9fb 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
 	dma_desc->ctl1 = cpu_to_le32(ctl1);
 }
 
-#define ENET_BRCM_TAG_LEN	4
-
 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
 				    struct bgmac_dma_ring *ring,
 				    struct sk_buff *skb)
@@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
 	u32 flags;
 	int i;
 
-	/* The Ethernet switch we are interfaced with needs packets to be at
-	 * least 64 bytes (including FCS) otherwise they will be discarded when
-	 * they enter the switch port logic. When Broadcom tags are enabled, we
-	 * need to make sure that packets are at least 68 bytes
-	 * (including FCS and tag) because the length verification is done after
-	 * the Broadcom tag is stripped off the ingress packet.
-	 */
-	if (netdev_uses_dsa(net_dev)) {
-		if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN))
-			goto err_stats;
-	}
-
 	if (skb->len > BGMAC_DESC_CTL1_LEN) {
 		netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
 		goto err_drop;
@@ -240,7 +226,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
 
 err_drop:
 	dev_kfree_skb(skb);
-err_stats:
 	net_dev->stats.tx_dropped++;
 	net_dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8ae269e..d7c98e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
 			break;
 		default:
-			WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
-				  be16_to_cpu(skb->protocol));
+			netdev_WARN_ONCE(bp->dev,
+					 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+					 be16_to_cpu(skb->protocol));
 		}
 	}
 #endif
@@ -2482,8 +2483,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
 	 */
 	if (bp->dev->features & NETIF_F_LRO)
 		fp->mode = TPA_MODE_LRO;
-	else if (bp->dev->features & NETIF_F_GRO &&
-		 bnx2x_mtu_allows_gro(bp->dev->mtu))
+	else if (bp->dev->features & NETIF_F_GRO_HW)
 		fp->mode = TPA_MODE_GRO;
 	else
 		fp->mode = TPA_MODE_DISABLED;
@@ -4874,6 +4874,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
 	 */
 	dev->mtu = new_mtu;
 
+	if (!bnx2x_mtu_allows_gro(new_mtu))
+		dev->features &= ~NETIF_F_GRO_HW;
+
 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
 
@@ -4903,10 +4906,13 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
 	}
 
 	/* TPA requires Rx CSUM offloading */
-	if (!(features & NETIF_F_RXCSUM)) {
+	if (!(features & NETIF_F_RXCSUM))
 		features &= ~NETIF_F_LRO;
-		features &= ~NETIF_F_GRO;
-	}
+
+	if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
+		features &= ~NETIF_F_GRO_HW;
+	if (features & NETIF_F_GRO_HW)
+		features &= ~NETIF_F_LRO;
 
 	return features;
 }
@@ -4933,13 +4939,8 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
 		}
 	}
 
-	/* if GRO is changed while LRO is enabled, don't force a reload */
-	if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
-		changes &= ~NETIF_F_GRO;
-
-	/* if GRO is changed while HW TPA is off, don't force a reload */
-	if ((changes & NETIF_F_GRO) && bp->disable_tpa)
-		changes &= ~NETIF_F_GRO;
+	/* Don't care about GRO changes */
+	changes &= ~NETIF_F_GRO;
 
 	if (changes)
 		bnx2x_reload = true;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ddd5d3e..7b08323 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12409,8 +12409,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
 	/* Set TPA flags */
 	if (bp->disable_tpa) {
-		bp->dev->hw_features &= ~NETIF_F_LRO;
-		bp->dev->features &= ~NETIF_F_LRO;
+		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
 	}
 
 	if (CHIP_IS_E1(bp))
@@ -13282,7 +13282,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 
 	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
-		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
 		NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
 	if (!chip_is_e1x) {
 		dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
@@ -13318,6 +13318,8 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 
 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->features |= NETIF_F_HIGHDMA;
+	if (dev->features & NETIF_F_LRO)
+		dev->features &= ~NETIF_F_GRO_HW;
 
 	/* Add Loopback capability to the device */
 	dev->hw_features |= NETIF_F_LOOPBACK;
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 59c8ec9..7c560d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_BNXT) += bnxt_en.o
 
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
 bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 61ca4eb..cf6ebf1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1645,6 +1645,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 	rxr->rx_next_cons = NEXT_RX(cons);
 
 next_rx_no_prod:
+	cpr->rx_packets += 1;
+	cpr->rx_bytes += len;
 	*raw_cons = tmp_raw_cons;
 
 	return rc;
@@ -1706,12 +1708,16 @@ static int bnxt_async_event_process(struct bnxt *bp,
 
 		if (BNXT_VF(bp))
 			goto async_event_process_exit;
-		if (data1 & 0x20000) {
+
+		/* print unsupported speed warning in forced speed mode only */
+		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
+		    (data1 & 0x20000)) {
 			u16 fw_speed = link_info->force_link_speed;
 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
 
-			netdev_warn(bp->dev, "Link speed %d no longer supported\n",
-				    speed);
+			if (speed != SPEED_UNKNOWN)
+				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+					    speed);
 		}
 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
 		/* fall thru */
@@ -1798,6 +1804,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance)
 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 	u32 cons = RING_CMP(cpr->cp_raw_cons);
 
+	cpr->event_ctr++;
 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
 	napi_schedule(&bnapi->napi);
 	return IRQ_HANDLED;
@@ -2021,6 +2028,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
 			break;
 		}
 	}
+	if (bp->flags & BNXT_FLAG_DIM) {
+		struct net_dim_sample dim_sample;
+
+		net_dim_sample(cpr->event_ctr,
+			       cpr->rx_packets,
+			       cpr->rx_bytes,
+			       &dim_sample);
+		net_dim(&cpr->dim, dim_sample);
+	}
 	mmiowb();
 	return work_done;
 }
@@ -2243,6 +2259,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
 		if (rxr->xdp_prog)
 			bpf_prog_put(rxr->xdp_prog);
 
+		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
+			xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
 		kfree(rxr->rx_tpa);
 		rxr->rx_tpa = NULL;
 
@@ -2276,6 +2295,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
 
 		ring = &rxr->rx_ring_struct;
 
+		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
+		if (rc < 0)
+			return rc;
+
 		rc = bnxt_alloc_ring(bp, ring);
 		if (rc)
 			return rc;
@@ -2606,6 +2629,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp)
 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
 
 		ring->fw_ring_id = INVALID_HW_RING_ID;
+		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
+		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
 	}
 }
 
@@ -2751,7 +2776,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
 		return;
 	if (bp->dev->features & NETIF_F_LRO)
 		bp->flags |= BNXT_FLAG_LRO;
-	if (bp->dev->features & NETIF_F_GRO)
+	else if (bp->dev->features & NETIF_F_GRO_HW)
 		bp->flags |= BNXT_FLAG_GRO;
 }
 
@@ -2830,6 +2855,9 @@ void bnxt_set_ring_params(struct bnxt *bp)
 	bp->cp_ring_mask = bp->cp_bit - 1;
 }
 
+/* Changing allocation mode of RX rings.
+ * TODO: Update when extending xdp_rxq_info to support allocation modes.
+ */
 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 {
 	if (page_mode) {
@@ -2839,10 +2867,10 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
 			min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
-		bp->dev->hw_features &= ~NETIF_F_LRO;
-		bp->dev->features &= ~NETIF_F_LRO;
 		bp->rx_dir = DMA_BIDIRECTIONAL;
 		bp->rx_skb_func = bnxt_rx_page_skb;
+		/* Disable LRO or GRO_HW */
+		netdev_update_features(bp->dev);
 	} else {
 		bp->dev->max_mtu = bp->max_mtu;
 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
@@ -4579,6 +4607,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
 	req->flags = cpu_to_le16(flags);
 }
 
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
+{
+	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_coal coal;
+	unsigned int grp_idx;
+
+	/* Tick values in micro seconds.
+	 * 1 coal_buf x bufs_per_record = 1 completion record.
+	 */
+	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
+
+	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
+	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
+
+	if (!bnapi->rx_ring)
+		return -ENODEV;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
+			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
+
+	bnxt_hwrm_set_coal_params(&coal, &req_rx);
+
+	grp_idx = bnapi->index;
+	req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
+
+	return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
+				 HWRM_CMD_TIMEOUT);
+}
+
 int bnxt_hwrm_set_coal(struct bnxt *bp)
 {
 	int i, rc = 0;
@@ -5701,7 +5759,13 @@ static void bnxt_enable_napi(struct bnxt *bp)
 	int i;
 
 	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
 		bp->bnapi[i]->in_reset = false;
+
+		if (bp->bnapi[i]->rx_ring) {
+			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
+			cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+		}
 		napi_enable(&bp->bnapi[i]->napi);
 	}
 }
@@ -6784,6 +6848,15 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
 		features &= ~NETIF_F_NTUPLE;
 
+	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+
+	if (!(features & NETIF_F_GRO))
+		features &= ~NETIF_F_GRO_HW;
+
+	if (features & NETIF_F_GRO_HW)
+		features &= ~NETIF_F_LRO;
+
 	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
 	 * turned on or off together.
 	 */
@@ -6817,9 +6890,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
 	bool update_tpa = false;
 
 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
-	if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+	if (features & NETIF_F_GRO_HW)
 		flags |= BNXT_FLAG_GRO;
-	if (features & NETIF_F_LRO)
+	else if (features & NETIF_F_LRO)
 		flags |= BNXT_FLAG_LRO;
 
 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
@@ -7800,8 +7873,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
 	bnxt_dcb_free(bp);
 	kfree(bp->edev);
 	bp->edev = NULL;
-	if (bp->xdp_prog)
-		bpf_prog_put(bp->xdp_prog);
 	bnxt_cleanup_pci(bp);
 	free_netdev(dev);
 }
@@ -7922,8 +7993,8 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 		if (rc)
 			return rc;
 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
-		bp->dev->hw_features &= ~NETIF_F_LRO;
-		bp->dev->features &= ~NETIF_F_LRO;
+		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
+		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
 		bnxt_set_ring_params(bp);
 	}
 
@@ -8106,7 +8177,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
 			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+		dev->hw_features |= NETIF_F_GRO_HW;
 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+	if (dev->features & NETIF_F_GRO_HW)
+		dev->features &= ~NETIF_F_LRO;
 	dev->priv_flags |= IFF_UNICAST_FLT;
 
 #ifdef CONFIG_BNXT_SRIOV
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5359a1f..89887a8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -23,6 +23,8 @@
 #include <net/devlink.h>
 #include <net/dst_metadata.h>
 #include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
 
 struct tx_bd {
 	__le32 tx_bd_len_flags_type;
@@ -607,6 +609,17 @@ struct bnxt_tx_ring_info {
 	struct bnxt_ring_struct	tx_ring_struct;
 };
 
+struct bnxt_coal {
+	u16			coal_ticks;
+	u16			coal_ticks_irq;
+	u16			coal_bufs;
+	u16			coal_bufs_irq;
+			/* RING_IDLE enabled when coal ticks < idle_thresh  */
+	u16			idle_thresh;
+	u8			bufs_per_record;
+	u8			budget;
+};
+
 struct bnxt_tpa_info {
 	void			*data;
 	u8			*data_ptr;
@@ -664,12 +677,20 @@ struct bnxt_rx_ring_info {
 
 	struct bnxt_ring_struct	rx_ring_struct;
 	struct bnxt_ring_struct	rx_agg_ring_struct;
+	struct xdp_rxq_info	xdp_rxq;
 };
 
 struct bnxt_cp_ring_info {
 	u32			cp_raw_cons;
 	void __iomem		*cp_doorbell;
 
+	struct bnxt_coal	rx_ring_coal;
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			event_ctr;
+
+	struct net_dim		dim;
+
 	struct tx_cmp		*cp_desc_ring[MAX_CP_PAGES];
 
 	dma_addr_t		cp_desc_mapping[MAX_CP_PAGES];
@@ -944,17 +965,6 @@ struct bnxt_test_info {
 #define BNXT_CAG_REG_LEGACY_INT_STATUS	0x4014
 #define BNXT_CAG_REG_BASE		0x300000
 
-struct bnxt_coal {
-	u16			coal_ticks;
-	u16			coal_ticks_irq;
-	u16			coal_bufs;
-	u16			coal_bufs_irq;
-			/* RING_IDLE enabled when coal ticks < idle_thresh  */
-	u16			idle_thresh;
-	u8			bufs_per_record;
-	u8			budget;
-};
-
 struct bnxt_tc_flow_stats {
 	u64		packets;
 	u64		bytes;
@@ -1126,6 +1136,7 @@ struct bnxt {
 	#define BNXT_FLAG_DOUBLE_DB	0x400000
 	#define BNXT_FLAG_FW_DCBX_AGENT	0x800000
 	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
+	#define BNXT_FLAG_DIM		0x2000000
 
 	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
 					    BNXT_FLAG_RFS |		\
@@ -1423,4 +1434,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
 void bnxt_restore_pf_fw_resources(struct bnxt *bp);
 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+void bnxt_dim_work(struct work_struct *work);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
+
 #endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fed37cd..3c746f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
 
 	n = IEEE_8021QAZ_MAX_TCS;
 	data_len = sizeof(*data) + sizeof(*fw_app) * n;
-	data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
-				  GFP_KERNEL);
+	data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping,
+				   GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
-	memset(data, 0, data_len);
 	bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
 	get.dest_data_addr = cpu_to_le64(mapping);
 	get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
new file mode 100644
index 0000000..408dd19
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c
@@ -0,0 +1,32 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/net_dim.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+
+void bnxt_dim_work(struct work_struct *work)
+{
+	struct net_dim *dim = container_of(work, struct net_dim,
+					   work);
+	struct bnxt_cp_ring_info *cpr = container_of(dim,
+						     struct bnxt_cp_ring_info,
+						     dim);
+	struct bnxt_napi *bnapi = container_of(cpr,
+					       struct bnxt_napi,
+					       cp_ring);
+	struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+								  dim->profile_ix);
+
+	cpr->rx_ring_coal.coal_ticks = cur_profile.usec;
+	cpr->rx_ring_coal.coal_bufs = cur_profile.pkts;
+
+	bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi);
+	dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b13ce5e..1801582 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
 
 	memset(coal, 0, sizeof(*coal));
 
+	coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
+
 	hw_coal = &bp->rx_coal;
 	mult = hw_coal->bufs_per_record;
 	coal->rx_coalesce_usecs = hw_coal->coal_ticks;
@@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev,
 	int rc = 0;
 	u16 mult;
 
+	if (coal->use_adaptive_rx_coalesce) {
+		bp->flags |= BNXT_FLAG_DIM;
+	} else {
+		if (bp->flags & BNXT_FLAG_DIM) {
+			bp->flags &= ~(BNXT_FLAG_DIM);
+			goto reset_coalesce;
+		}
+	}
+
 	hw_coal = &bp->rx_coal;
 	mult = hw_coal->bufs_per_record;
 	hw_coal->coal_ticks = coal->rx_coalesce_usecs;
@@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
 		update_stats = true;
 	}
 
+reset_coalesce:
 	if (netif_running(dev)) {
 		if (update_stats) {
 			rc = bnxt_close_nic(bp, true, false);
@@ -1376,6 +1388,9 @@ static int bnxt_firmware_reset(struct net_device *dev,
 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
 		break;
+	case BNXT_FW_RESET_AP:
+		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -2522,6 +2537,14 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
 		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
 		if (!rc)
 			netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+	} else if (*flags == ETH_RESET_AP) {
+		/* This feature is not supported in older firmware versions */
+		if (bp->hwrm_spec_code < 0x10803)
+			return -EOPNOTSUPP;
+
+		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
+		if (!rc)
+			netdev_info(dev, "Reset Application Processor request successful.\n");
 	} else {
 		rc = -EINVAL;
 	}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ff601b4..836ef68 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -34,6 +34,7 @@ struct bnxt_led_cfg {
 #define BNXT_LED_DFLT_ENABLES(x)			\
 	cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
 
+#define BNXT_FW_RESET_AP	0xfffe
 #define BNXT_FW_RESET_CHIP	0xffff
 
 extern const struct ethtool_ops bnxt_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d8fee26..1d9b08c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -54,12 +54,10 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
 			       struct bnxt_tc_actions *actions,
 			       const struct tc_action *tc_act)
 {
-	int ifindex = tcf_mirred_ifindex(tc_act);
-	struct net_device *dev;
+	struct net_device *dev = tcf_mirred_dev(tc_act);
 
-	dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
 	if (!dev) {
-		netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
+		netdev_info(bp->dev, "no dev in mirred action");
 		return -EINVAL;
 	}
 
@@ -148,9 +146,6 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
 		}
 	}
 
-	if (rc)
-		return rc;
-
 	if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
 		if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
 			/* dst_fid is PF's fid */
@@ -164,7 +159,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 #define GET_KEY(flow_cmd, key_type)					\
@@ -1417,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
 	void *flow_node;
 	int rc, i;
 
-	rc = rhashtable_walk_start(iter);
-	if (rc && rc != -EAGAIN) {
-		i = 0;
-		goto done;
-	}
+	rhashtable_walk_start(iter);
 
 	rc = 0;
 	for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 261e584..1389ab5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 	xdp.data = *data_ptr;
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = *data_ptr + *len;
+	xdp.rxq = &rxr->xdp_rxq;
 	orig_data = xdp.data;
 	mapping = rx_buf->mapping - bp->rx_dma_offset;
 
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8995cfe..a77ee2f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3227,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
 	return 0;
 }
 
-#define NVRAM_CMD_TIMEOUT 5000
+#define NVRAM_CMD_TIMEOUT 10000
 
 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 {
@@ -14789,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp)
 
 static void tg3_get_5720_nvram_info(struct tg3 *tp)
 {
-	u32 nvcfg1, nvmpinstrp;
+	u32 nvcfg1, nvmpinstrp, nv_status;
 
 	nvcfg1 = tr32(NVRAM_CFG1);
 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
@@ -14801,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
 		}
 
 		switch (nvmpinstrp) {
+		case FLASH_5762_MX25L_100:
+		case FLASH_5762_MX25L_200:
+		case FLASH_5762_MX25L_400:
+		case FLASH_5762_MX25L_800:
+		case FLASH_5762_MX25L_160_320:
+			tp->nvram_pagesize = 4096;
+			tp->nvram_jedecnum = JEDEC_MACRONIX;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+			tg3_flag_set(tp, FLASH);
+			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
+			tp->nvram_size =
+				(1 << (nv_status >> AUTOSENSE_DEVID &
+						AUTOSENSE_DEVID_MASK)
+					<< AUTOSENSE_SIZE_IN_MB);
+			return;
+
 		case FLASH_5762_EEPROM_HD:
 			nvmpinstrp = FLASH_5720_EEPROM_HD;
 			break;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1f0271f..47f51cc 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1863,7 +1863,7 @@
 #define NVRAM_STAT			0x00007004
 #define NVRAM_WRDATA			0x00007008
 #define NVRAM_ADDR			0x0000700c
-#define  NVRAM_ADDR_MSK			0x00ffffff
+#define  NVRAM_ADDR_MSK			0x07ffffff
 #define NVRAM_RDDATA			0x00007010
 #define NVRAM_CFG1			0x00007014
 #define  NVRAM_CFG1_FLASHIF_ENAB	 0x00000001
@@ -1945,6 +1945,11 @@
 #define  FLASH_5720_EEPROM_LD		 0x00000003
 #define  FLASH_5762_EEPROM_HD		 0x02000001
 #define  FLASH_5762_EEPROM_LD		 0x02000003
+#define  FLASH_5762_MX25L_100           0x00800000
+#define  FLASH_5762_MX25L_200           0x00800002
+#define  FLASH_5762_MX25L_400           0x00800001
+#define  FLASH_5762_MX25L_800           0x00800003
+#define  FLASH_5762_MX25L_160_320       0x03800002
 #define  FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
 #define  FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
 #define  FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2009,7 +2014,11 @@
 /* 0x702c unused */
 
 #define NVRAM_ADDR_LOCKOUT		0x00007030
-/* 0x7034 --> 0x7500 unused */
+#define NVRAM_AUTOSENSE_STATUS         0x00007038
+#define AUTOSENSE_DEVID                        0x00000010
+#define AUTOSENSE_DEVID_MASK           0x00000007
+#define AUTOSENSE_SIZE_IN_MB           17
+/* 0x703c --> 0x7500 unused */
 
 #define OTP_MODE			0x00007500
 #define OTP_MODE_OTP_THRU_GRC		 0x00000001
@@ -3378,6 +3387,7 @@ struct tg3 {
 #define JEDEC_ST			0x20
 #define JEDEC_SAIFUN			0x4f
 #define JEDEC_SST			0xbf
+#define JEDEC_MACRONIX                 0xc2
 
 #define ATMEL_AT24C02_CHIP_SIZE		TG3_NVRAM_SIZE_2KB
 #define ATMEL_AT24C02_PAGE_SIZE		(8)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index c93f3a2..c50c5ec 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -164,14 +164,38 @@
 #define GEM_DCFG5		0x0290 /* Design Config 5 */
 #define GEM_DCFG6		0x0294 /* Design Config 6 */
 #define GEM_DCFG7		0x0298 /* Design Config 7 */
+#define GEM_DCFG8		0x029C /* Design Config 8 */
 
 #define GEM_TXBDCTRL	0x04cc /* TX Buffer Descriptor control register */
 #define GEM_RXBDCTRL	0x04d0 /* RX Buffer Descriptor control register */
 
+/* Screener Type 2 match registers */
+#define GEM_SCRT2		0x540
+
+/* EtherType registers */
+#define GEM_ETHT		0x06E0
+
+/* Type 2 compare registers */
+#define GEM_T2CMPW0		0x0700
+#define GEM_T2CMPW1		0x0704
+#define T2CMP_OFST(t2idx)	(t2idx * 2)
+
+/* type 2 compare registers
+ * each location requires 3 compare regs
+ */
+#define GEM_IP4SRC_CMP(idx)		(idx * 3)
+#define GEM_IP4DST_CMP(idx)		(idx * 3 + 1)
+#define GEM_PORT_CMP(idx)		(idx * 3 + 2)
+
+/* Which screening type 2 EtherType register will be used (0 - 7) */
+#define SCRT2_ETHT		0
+
 #define GEM_ISR(hw_q)		(0x0400 + ((hw_q) << 2))
 #define GEM_TBQP(hw_q)		(0x0440 + ((hw_q) << 2))
 #define GEM_TBQPH(hw_q)		(0x04C8)
 #define GEM_RBQP(hw_q)		(0x0480 + ((hw_q) << 2))
+#define GEM_RBQS(hw_q)		(0x04A0 + ((hw_q) << 2))
+#define GEM_RBQPH(hw_q)		(0x04D4)
 #define GEM_IER(hw_q)		(0x0600 + ((hw_q) << 2))
 #define GEM_IDR(hw_q)		(0x0620 + ((hw_q) << 2))
 #define GEM_IMR(hw_q)		(0x0640 + ((hw_q) << 2))
@@ -455,6 +479,16 @@
 #define GEM_DAW64_OFFSET			23
 #define GEM_DAW64_SIZE				1
 
+/* Bitfields in DCFG8. */
+#define GEM_T1SCR_OFFSET			24
+#define GEM_T1SCR_SIZE				8
+#define GEM_T2SCR_OFFSET			16
+#define GEM_T2SCR_SIZE				8
+#define GEM_SCR2ETH_OFFSET			8
+#define GEM_SCR2ETH_SIZE			8
+#define GEM_SCR2CMP_OFFSET			0
+#define GEM_SCR2CMP_SIZE			8
+
 /* Bitfields in TISUBN */
 #define GEM_SUBNSINCR_OFFSET			0
 #define GEM_SUBNSINCR_SIZE			16
@@ -483,6 +517,66 @@
 #define GEM_RXTSMODE_OFFSET			4 /* RX Descriptor Timestamp Insertion mode */
 #define GEM_RXTSMODE_SIZE			2
 
+/* Bitfields in SCRT2 */
+#define GEM_QUEUE_OFFSET			0 /* Queue Number */
+#define GEM_QUEUE_SIZE				4
+#define GEM_VLANPR_OFFSET			4 /* VLAN Priority */
+#define GEM_VLANPR_SIZE				3
+#define GEM_VLANEN_OFFSET			8 /* VLAN Enable */
+#define GEM_VLANEN_SIZE				1
+#define GEM_ETHT2IDX_OFFSET			9 /* Index to screener type 2 EtherType register */
+#define GEM_ETHT2IDX_SIZE			3
+#define GEM_ETHTEN_OFFSET			12 /* EtherType Enable */
+#define GEM_ETHTEN_SIZE				1
+#define GEM_CMPA_OFFSET				13 /* Compare A - Index to screener type 2 Compare register */
+#define GEM_CMPA_SIZE				5
+#define GEM_CMPAEN_OFFSET			18 /* Compare A Enable */
+#define GEM_CMPAEN_SIZE				1
+#define GEM_CMPB_OFFSET				19 /* Compare B - Index to screener type 2 Compare register */
+#define GEM_CMPB_SIZE				5
+#define GEM_CMPBEN_OFFSET			24 /* Compare B Enable */
+#define GEM_CMPBEN_SIZE				1
+#define GEM_CMPC_OFFSET				25 /* Compare C - Index to screener type 2 Compare register */
+#define GEM_CMPC_SIZE				5
+#define GEM_CMPCEN_OFFSET			30 /* Compare C Enable */
+#define GEM_CMPCEN_SIZE				1
+
+/* Bitfields in ETHT */
+#define GEM_ETHTCMP_OFFSET			0 /* EtherType compare value */
+#define GEM_ETHTCMP_SIZE			16
+
+/* Bitfields in T2CMPW0 */
+#define GEM_T2CMP_OFFSET			16 /* 0xFFFF0000 compare value */
+#define GEM_T2CMP_SIZE				16
+#define GEM_T2MASK_OFFSET			0 /* 0x0000FFFF compare value or mask */
+#define GEM_T2MASK_SIZE				16
+
+/* Bitfields in T2CMPW1 */
+#define GEM_T2DISMSK_OFFSET			9 /* disable mask */
+#define GEM_T2DISMSK_SIZE			1
+#define GEM_T2CMPOFST_OFFSET			7 /* compare offset */
+#define GEM_T2CMPOFST_SIZE			2
+#define GEM_T2OFST_OFFSET			0 /* offset value */
+#define GEM_T2OFST_SIZE				7
+
+/* Offset for screener type 2 compare values (T2CMPOFST).
+ * Note the offset is applied after the specified point,
+ * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
+ * of 12 bytes from this would be the source IP address in an IP header
+ */
+#define GEM_T2COMPOFST_SOF		0
+#define GEM_T2COMPOFST_ETYPE	1
+#define GEM_T2COMPOFST_IPHDR	2
+#define GEM_T2COMPOFST_TCPUDP	3
+
+/* offset from EtherType to IP address */
+#define ETYPE_SRCIP_OFFSET			12
+#define ETYPE_DSTIP_OFFSET			16
+
+/* offset from IP header to port */
+#define IPHDR_SRCPORT_OFFSET		0
+#define IPHDR_DSTPORT_OFFSET		2
+
 /* Transmit DMA buffer descriptor Word 1 */
 #define GEM_DMA_TXVALID_OFFSET		23 /* timestamp has been captured in the Buffer Descriptor */
 #define GEM_DMA_TXVALID_SIZE		1
@@ -583,6 +677,8 @@
 #define gem_writel(port, reg, value)	(port)->macb_reg_writel((port), GEM_##reg, (value))
 #define queue_readl(queue, reg)		(queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
 #define queue_writel(queue, reg, value)	(queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
+#define gem_readl_n(port, reg, idx)		(port)->macb_reg_readl((port), GEM_##reg + idx * 4)
+#define gem_writel_n(port, reg, idx, value)	(port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
 
 #define PTP_TS_BUFFER_SIZE		128 /* must be power of 2 */
 
@@ -920,13 +1016,42 @@ static const struct gem_statistic gem_statistics[] = {
 
 #define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
 
+#define QUEUE_STAT_TITLE(title) {	\
+	.stat_string = title,			\
+}
+
+/* per queue statistics, each should be unsigned long type */
+struct queue_stats {
+	union {
+		unsigned long first;
+		unsigned long rx_packets;
+	};
+	unsigned long rx_bytes;
+	unsigned long rx_dropped;
+	unsigned long tx_packets;
+	unsigned long tx_bytes;
+	unsigned long tx_dropped;
+};
+
+static const struct gem_statistic queue_statistics[] = {
+		QUEUE_STAT_TITLE("rx_packets"),
+		QUEUE_STAT_TITLE("rx_bytes"),
+		QUEUE_STAT_TITLE("rx_dropped"),
+		QUEUE_STAT_TITLE("tx_packets"),
+		QUEUE_STAT_TITLE("tx_bytes"),
+		QUEUE_STAT_TITLE("tx_dropped"),
+};
+
+#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics)
+
 struct macb;
+struct macb_queue;
 
 struct macb_or_gem_ops {
 	int	(*mog_alloc_rx_buffers)(struct macb *bp);
 	void	(*mog_free_rx_buffers)(struct macb *bp);
 	void	(*mog_init_rings)(struct macb *bp);
-	int	(*mog_rx)(struct macb *bp, int budget);
+	int	(*mog_rx)(struct macb_queue *queue, int budget);
 };
 
 /* MACB-PTP interface: adapt to platform needs. */
@@ -968,6 +1093,9 @@ struct macb_queue {
 	unsigned int		IMR;
 	unsigned int		TBQP;
 	unsigned int		TBQPH;
+	unsigned int		RBQS;
+	unsigned int		RBQP;
+	unsigned int		RBQPH;
 
 	unsigned int		tx_head, tx_tail;
 	struct macb_dma_desc	*tx_ring;
@@ -975,6 +1103,16 @@ struct macb_queue {
 	dma_addr_t		tx_ring_dma;
 	struct work_struct	tx_error_task;
 
+	dma_addr_t		rx_ring_dma;
+	dma_addr_t		rx_buffers_dma;
+	unsigned int		rx_tail;
+	unsigned int		rx_prepared_head;
+	struct macb_dma_desc	*rx_ring;
+	struct sk_buff		**rx_skbuff;
+	void			*rx_buffers;
+	struct napi_struct	napi;
+	struct queue_stats stats;
+
 #ifdef CONFIG_MACB_USE_HWSTAMP
 	struct work_struct	tx_ts_task;
 	unsigned int		tx_ts_head, tx_ts_tail;
@@ -982,6 +1120,16 @@ struct macb_queue {
 #endif
 };
 
+struct ethtool_rx_fs_item {
+	struct ethtool_rx_flow_spec fs;
+	struct list_head list;
+};
+
+struct ethtool_rx_fs_list {
+	struct list_head list;
+	unsigned int count;
+};
+
 struct macb {
 	void __iomem		*regs;
 	bool			native_io;
@@ -990,11 +1138,6 @@ struct macb {
 	u32	(*macb_reg_readl)(struct macb *bp, int offset);
 	void	(*macb_reg_writel)(struct macb *bp, int offset, u32 value);
 
-	unsigned int		rx_tail;
-	unsigned int		rx_prepared_head;
-	struct macb_dma_desc	*rx_ring;
-	struct sk_buff		**rx_skbuff;
-	void			*rx_buffers;
 	size_t			rx_buffer_size;
 
 	unsigned int		rx_ring_size;
@@ -1011,15 +1154,11 @@ struct macb {
 	struct clk		*tx_clk;
 	struct clk		*rx_clk;
 	struct net_device	*dev;
-	struct napi_struct	napi;
 	union {
 		struct macb_stats	macb;
 		struct gem_stats	gem;
 	}			hw_stats;
 
-	dma_addr_t		rx_ring_dma;
-	dma_addr_t		rx_buffers_dma;
-
 	struct macb_or_gem_ops	macbgem_ops;
 
 	struct mii_bus		*mii_bus;
@@ -1032,7 +1171,6 @@ struct macb {
 	unsigned int		dma_burst_length;
 
 	phy_interface_t		phy_interface;
-	struct gpio_desc	*reset_gpio;
 
 	/* AT91RM9200 transmit */
 	struct sk_buff *skb;			/* holds skb until xmit interrupt completes */
@@ -1040,7 +1178,7 @@ struct macb {
 	int skb_length;				/* saved skb length for pci_unmap_single */
 	unsigned int		max_tx_length;
 
-	u64			ethtool_stats[GEM_STATS_LEN];
+	u64			ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
 
 	unsigned int		rx_frm_len_mask;
 	unsigned int		jumbo_max_len;
@@ -1057,6 +1195,11 @@ struct macb {
 	struct ptp_clock_info ptp_clock_info;
 	struct tsu_incr tsu_incr;
 	struct hwtstamp_config tstamp_config;
+
+	/* RX queue filer rule set*/
+	struct ethtool_rx_fs_list rx_fs_list;
+	spinlock_t rx_fs_lock;
+	unsigned int max_tuples;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72a67f7..234667e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
 	return index & (bp->rx_ring_size - 1);
 }
 
-static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
 {
-	index = macb_rx_ring_wrap(bp, index);
-	index = macb_adj_dma_desc_idx(bp, index);
-	return &bp->rx_ring[index];
+	index = macb_rx_ring_wrap(queue->bp, index);
+	index = macb_adj_dma_desc_idx(queue->bp, index);
+	return &queue->rx_ring[index];
 }
 
-static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
 {
-	return bp->rx_buffers + bp->rx_buffer_size *
-	       macb_rx_ring_wrap(bp, index);
+	return queue->rx_buffers + queue->bp->rx_buffer_size *
+	       macb_rx_ring_wrap(queue->bp, index);
 }
 
 /* I/O accessors */
@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work)
 					    macb_tx_ring_wrap(bp, tail),
 					    skb->data);
 				bp->dev->stats.tx_packets++;
+				queue->stats.tx_packets++;
 				bp->dev->stats.tx_bytes += skb->len;
+				queue->stats.tx_bytes += skb->len;
 			}
 		} else {
 			/* "Buffers exhausted mid-frame" errors may only happen
@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
 					    macb_tx_ring_wrap(bp, tail),
 					    skb->data);
 				bp->dev->stats.tx_packets++;
+				queue->stats.tx_packets++;
 				bp->dev->stats.tx_bytes += skb->len;
+				queue->stats.tx_bytes += skb->len;
 			}
 
 			/* Now we can safely release resources */
@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue)
 		netif_wake_subqueue(bp->dev, queue_index);
 }
 
-static void gem_rx_refill(struct macb *bp)
+static void gem_rx_refill(struct macb_queue *queue)
 {
 	unsigned int		entry;
 	struct sk_buff		*skb;
 	dma_addr_t		paddr;
+	struct macb *bp = queue->bp;
 	struct macb_dma_desc *desc;
 
-	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
-			  bp->rx_ring_size) > 0) {
-		entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
+	while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
+			bp->rx_ring_size) > 0) {
+		entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
 
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
 
-		bp->rx_prepared_head++;
-		desc = macb_rx_desc(bp, entry);
+		queue->rx_prepared_head++;
+		desc = macb_rx_desc(queue, entry);
 
-		if (!bp->rx_skbuff[entry]) {
+		if (!queue->rx_skbuff[entry]) {
 			/* allocate sk_buff for this free entry in ring */
 			skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
 			if (unlikely(!skb)) {
@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp)
 				break;
 			}
 
-			bp->rx_skbuff[entry] = skb;
+			queue->rx_skbuff[entry] = skb;
 
 			if (entry == bp->rx_ring_size - 1)
 				paddr |= MACB_BIT(RX_WRAP);
@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp)
 	/* Make descriptor updates visible to hardware */
 	wmb();
 
-	netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
-		    bp->rx_prepared_head, bp->rx_tail);
+	netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
+			queue, queue->rx_prepared_head, queue->rx_tail);
 }
 
 /* Mark DMA descriptors from begin up to and not including end as unused */
-static void discard_partial_frame(struct macb *bp, unsigned int begin,
+static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
 				  unsigned int end)
 {
 	unsigned int frag;
 
 	for (frag = begin; frag != end; frag++) {
-		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+		struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
 
 		desc->addr &= ~MACB_BIT(RX_USED);
 	}
@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
 	 */
 }
 
-static int gem_rx(struct macb *bp, int budget)
+static int gem_rx(struct macb_queue *queue, int budget)
 {
+	struct macb *bp = queue->bp;
 	unsigned int		len;
 	unsigned int		entry;
 	struct sk_buff		*skb;
@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget)
 		dma_addr_t addr;
 		bool rxused;
 
-		entry = macb_rx_ring_wrap(bp, bp->rx_tail);
-		desc = macb_rx_desc(bp, entry);
+		entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+		desc = macb_rx_desc(queue, entry);
 
 		/* Make hw descriptor updates visible to CPU */
 		rmb();
@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget)
 		if (!rxused)
 			break;
 
-		bp->rx_tail++;
+		queue->rx_tail++;
 		count++;
 
 		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
 			netdev_err(bp->dev,
 				   "not whole frame pointed by descriptor\n");
 			bp->dev->stats.rx_dropped++;
+			queue->stats.rx_dropped++;
 			break;
 		}
-		skb = bp->rx_skbuff[entry];
+		skb = queue->rx_skbuff[entry];
 		if (unlikely(!skb)) {
 			netdev_err(bp->dev,
 				   "inconsistent Rx descriptor chain\n");
 			bp->dev->stats.rx_dropped++;
+			queue->stats.rx_dropped++;
 			break;
 		}
 		/* now everything is ready for receiving packet */
-		bp->rx_skbuff[entry] = NULL;
+		queue->rx_skbuff[entry] = NULL;
 		len = ctrl & bp->rx_frm_len_mask;
 
 		netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget)
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 		bp->dev->stats.rx_packets++;
+		queue->stats.rx_packets++;
 		bp->dev->stats.rx_bytes += skb->len;
+		queue->stats.rx_bytes += skb->len;
 
 		gem_ptp_do_rxstamp(bp, skb, desc);
 
@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget)
 		netif_receive_skb(skb);
 	}
 
-	gem_rx_refill(bp);
+	gem_rx_refill(queue);
 
 	return count;
 }
 
-static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
 			 unsigned int last_frag)
 {
 	unsigned int len;
@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 	unsigned int offset;
 	struct sk_buff *skb;
 	struct macb_dma_desc *desc;
+	struct macb *bp = queue->bp;
 
-	desc = macb_rx_desc(bp, last_frag);
+	desc = macb_rx_desc(queue, last_frag);
 	len = desc->ctrl & bp->rx_frm_len_mask;
 
 	netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 	if (!skb) {
 		bp->dev->stats.rx_dropped++;
 		for (frag = first_frag; ; frag++) {
-			desc = macb_rx_desc(bp, frag);
+			desc = macb_rx_desc(queue, frag);
 			desc->addr &= ~MACB_BIT(RX_USED);
 			if (frag == last_frag)
 				break;
@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 			frag_len = len - offset;
 		}
 		skb_copy_to_linear_data_offset(skb, offset,
-					       macb_rx_buffer(bp, frag),
+					       macb_rx_buffer(queue, frag),
 					       frag_len);
 		offset += bp->rx_buffer_size;
-		desc = macb_rx_desc(bp, frag);
+		desc = macb_rx_desc(queue, frag);
 		desc->addr &= ~MACB_BIT(RX_USED);
 
 		if (frag == last_frag)
@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 	return 0;
 }
 
-static inline void macb_init_rx_ring(struct macb *bp)
+static inline void macb_init_rx_ring(struct macb_queue *queue)
 {
+	struct macb *bp = queue->bp;
 	dma_addr_t addr;
 	struct macb_dma_desc *desc = NULL;
 	int i;
 
-	addr = bp->rx_buffers_dma;
+	addr = queue->rx_buffers_dma;
 	for (i = 0; i < bp->rx_ring_size; i++) {
-		desc = macb_rx_desc(bp, i);
+		desc = macb_rx_desc(queue, i);
 		macb_set_addr(bp, desc, addr);
 		desc->ctrl = 0;
 		addr += bp->rx_buffer_size;
 	}
 	desc->addr |= MACB_BIT(RX_WRAP);
-	bp->rx_tail = 0;
+	queue->rx_tail = 0;
 }
 
-static int macb_rx(struct macb *bp, int budget)
+static int macb_rx(struct macb_queue *queue, int budget)
 {
+	struct macb *bp = queue->bp;
 	bool reset_rx_queue = false;
 	int received = 0;
 	unsigned int tail;
 	int first_frag = -1;
 
-	for (tail = bp->rx_tail; budget > 0; tail++) {
-		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+	for (tail = queue->rx_tail; budget > 0; tail++) {
+		struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
 		u32 ctrl;
 
 		/* Make hw descriptor updates visible to CPU */
@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget)
 
 		if (ctrl & MACB_BIT(RX_SOF)) {
 			if (first_frag != -1)
-				discard_partial_frame(bp, first_frag, tail);
+				discard_partial_frame(queue, first_frag, tail);
 			first_frag = tail;
 		}
 
@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget)
 				continue;
 			}
 
-			dropped = macb_rx_frame(bp, first_frag, tail);
+			dropped = macb_rx_frame(queue, first_frag, tail);
 			first_frag = -1;
 			if (unlikely(dropped < 0)) {
 				reset_rx_queue = true;
@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget)
 		ctrl = macb_readl(bp, NCR);
 		macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
 
-		macb_init_rx_ring(bp);
-		macb_writel(bp, RBQP, bp->rx_ring_dma);
+		macb_init_rx_ring(queue);
+		queue_writel(queue, RBQP, queue->rx_ring_dma);
 
 		macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
 
@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget)
 	}
 
 	if (first_frag != -1)
-		bp->rx_tail = first_frag;
+		queue->rx_tail = first_frag;
 	else
-		bp->rx_tail = tail;
+		queue->rx_tail = tail;
 
 	return received;
 }
 
 static int macb_poll(struct napi_struct *napi, int budget)
 {
-	struct macb *bp = container_of(napi, struct macb, napi);
+	struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
+	struct macb *bp = queue->bp;
 	int work_done;
 	u32 status;
 
@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
 	netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
 		    (unsigned long)status, budget);
 
-	work_done = bp->macbgem_ops.mog_rx(bp, budget);
+	work_done = bp->macbgem_ops.mog_rx(queue, budget);
 	if (work_done < budget) {
 		napi_complete_done(napi, work_done);
 
@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
 		status = macb_readl(bp, RSR);
 		if (status) {
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
-				macb_writel(bp, ISR, MACB_BIT(RCOMP));
+				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 			napi_reschedule(napi);
 		} else {
-			macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+			queue_writel(queue, IER, MACB_RX_INT_FLAGS);
 		}
 	}
 
@@ -1282,9 +1296,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
 			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
 				queue_writel(queue, ISR, MACB_BIT(RCOMP));
 
-			if (napi_schedule_prep(&bp->napi)) {
+			if (napi_schedule_prep(&queue->napi)) {
 				netdev_vdbg(bp->dev, "scheduling RX softirq\n");
-				__napi_schedule(&bp->napi);
+				__napi_schedule(&queue->napi);
 			}
 		}
 
@@ -1708,38 +1722,44 @@ static void gem_free_rx_buffers(struct macb *bp)
 {
 	struct sk_buff		*skb;
 	struct macb_dma_desc	*desc;
+	struct macb_queue *queue;
 	dma_addr_t		addr;
+	unsigned int q;
 	int i;
 
-	if (!bp->rx_skbuff)
-		return;
-
-	for (i = 0; i < bp->rx_ring_size; i++) {
-		skb = bp->rx_skbuff[i];
-
-		if (!skb)
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		if (!queue->rx_skbuff)
 			continue;
 
-		desc = macb_rx_desc(bp, i);
-		addr = macb_get_addr(bp, desc);
+		for (i = 0; i < bp->rx_ring_size; i++) {
+			skb = queue->rx_skbuff[i];
 
-		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
-				 DMA_FROM_DEVICE);
-		dev_kfree_skb_any(skb);
-		skb = NULL;
+			if (!skb)
+				continue;
+
+			desc = macb_rx_desc(queue, i);
+			addr = macb_get_addr(bp, desc);
+
+			dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+					DMA_FROM_DEVICE);
+			dev_kfree_skb_any(skb);
+			skb = NULL;
+		}
+
+		kfree(queue->rx_skbuff);
+		queue->rx_skbuff = NULL;
 	}
-
-	kfree(bp->rx_skbuff);
-	bp->rx_skbuff = NULL;
 }
 
 static void macb_free_rx_buffers(struct macb *bp)
 {
-	if (bp->rx_buffers) {
+	struct macb_queue *queue = &bp->queues[0];
+
+	if (queue->rx_buffers) {
 		dma_free_coherent(&bp->pdev->dev,
 				  bp->rx_ring_size * bp->rx_buffer_size,
-				  bp->rx_buffers, bp->rx_buffers_dma);
-		bp->rx_buffers = NULL;
+				  queue->rx_buffers, queue->rx_buffers_dma);
+		queue->rx_buffers = NULL;
 	}
 }
 
@@ -1748,11 +1768,12 @@ static void macb_free_consistent(struct macb *bp)
 	struct macb_queue *queue;
 	unsigned int q;
 
+	queue = &bp->queues[0];
 	bp->macbgem_ops.mog_free_rx_buffers(bp);
-	if (bp->rx_ring) {
+	if (queue->rx_ring) {
 		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
-				  bp->rx_ring, bp->rx_ring_dma);
-		bp->rx_ring = NULL;
+				queue->rx_ring, queue->rx_ring_dma);
+		queue->rx_ring = NULL;
 	}
 
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -1768,32 +1789,37 @@ static void macb_free_consistent(struct macb *bp)
 
 static int gem_alloc_rx_buffers(struct macb *bp)
 {
+	struct macb_queue *queue;
+	unsigned int q;
 	int size;
 
-	size = bp->rx_ring_size * sizeof(struct sk_buff *);
-	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
-	if (!bp->rx_skbuff)
-		return -ENOMEM;
-	else
-		netdev_dbg(bp->dev,
-			   "Allocated %d RX struct sk_buff entries at %p\n",
-			   bp->rx_ring_size, bp->rx_skbuff);
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		size = bp->rx_ring_size * sizeof(struct sk_buff *);
+		queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
+		if (!queue->rx_skbuff)
+			return -ENOMEM;
+		else
+			netdev_dbg(bp->dev,
+				   "Allocated %d RX struct sk_buff entries at %p\n",
+				   bp->rx_ring_size, queue->rx_skbuff);
+	}
 	return 0;
 }
 
 static int macb_alloc_rx_buffers(struct macb *bp)
 {
+	struct macb_queue *queue = &bp->queues[0];
 	int size;
 
 	size = bp->rx_ring_size * bp->rx_buffer_size;
-	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
-					    &bp->rx_buffers_dma, GFP_KERNEL);
-	if (!bp->rx_buffers)
+	queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+					    &queue->rx_buffers_dma, GFP_KERNEL);
+	if (!queue->rx_buffers)
 		return -ENOMEM;
 
 	netdev_dbg(bp->dev,
 		   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
-		   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+		   size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
 	return 0;
 }
 
@@ -1819,17 +1845,16 @@ static int macb_alloc_consistent(struct macb *bp)
 		queue->tx_skb = kmalloc(size, GFP_KERNEL);
 		if (!queue->tx_skb)
 			goto out_err;
+
+		size = RX_RING_BYTES(bp);
+		queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+						 &queue->rx_ring_dma, GFP_KERNEL);
+		if (!queue->rx_ring)
+			goto out_err;
+		netdev_dbg(bp->dev,
+			   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+			   size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
 	}
-
-	size = RX_RING_BYTES(bp);
-	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
-					 &bp->rx_ring_dma, GFP_KERNEL);
-	if (!bp->rx_ring)
-		goto out_err;
-	netdev_dbg(bp->dev,
-		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
-		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
-
 	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
 		goto out_err;
 
@@ -1856,12 +1881,13 @@ static void gem_init_rings(struct macb *bp)
 		desc->ctrl |= MACB_BIT(TX_WRAP);
 		queue->tx_head = 0;
 		queue->tx_tail = 0;
+
+		queue->rx_tail = 0;
+		queue->rx_prepared_head = 0;
+
+		gem_rx_refill(queue);
 	}
 
-	bp->rx_tail = 0;
-	bp->rx_prepared_head = 0;
-
-	gem_rx_refill(bp);
 }
 
 static void macb_init_rings(struct macb *bp)
@@ -1869,7 +1895,7 @@ static void macb_init_rings(struct macb *bp)
 	int i;
 	struct macb_dma_desc *desc = NULL;
 
-	macb_init_rx_ring(bp);
+	macb_init_rx_ring(&bp->queues[0]);
 
 	for (i = 0; i < bp->tx_ring_size; i++) {
 		desc = macb_tx_desc(&bp->queues[0], i);
@@ -1978,11 +2004,20 @@ static u32 macb_dbw(struct macb *bp)
  */
 static void macb_configure_dma(struct macb *bp)
 {
+	struct macb_queue *queue;
+	u32 buffer_size;
+	unsigned int q;
 	u32 dmacfg;
 
+	buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
 	if (macb_is_gem(bp)) {
 		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
-		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+			if (q)
+				queue_writel(queue, RBQS, buffer_size);
+			else
+				dmacfg |= GEM_BF(RXBS, buffer_size);
+		}
 		if (bp->dma_burst_length)
 			dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
 		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
@@ -2051,12 +2086,12 @@ static void macb_init_hw(struct macb *bp)
 	macb_configure_dma(bp);
 
 	/* Initialize TX and RX buffers */
-	macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-	if (bp->hw_dma_cap & HW_DMA_CAP_64B)
-		macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
-#endif
 	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+		queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
+#endif
 		queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 		if (bp->hw_dma_cap & HW_DMA_CAP_64B)
@@ -2197,6 +2232,8 @@ static int macb_open(struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
 	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+	struct macb_queue *queue;
+	unsigned int q;
 	int err;
 
 	netdev_dbg(bp->dev, "open\n");
@@ -2218,11 +2255,12 @@ static int macb_open(struct net_device *dev)
 		return err;
 	}
 
-	napi_enable(&bp->napi);
-
 	bp->macbgem_ops.mog_init_rings(bp);
 	macb_init_hw(bp);
 
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+		napi_enable(&queue->napi);
+
 	/* schedule a link state check */
 	phy_start(dev->phydev);
 
@@ -2237,10 +2275,14 @@ static int macb_open(struct net_device *dev)
 static int macb_close(struct net_device *dev)
 {
 	struct macb *bp = netdev_priv(dev);
+	struct macb_queue *queue;
 	unsigned long flags;
+	unsigned int q;
 
 	netif_tx_stop_all_queues(dev);
-	napi_disable(&bp->napi);
+
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+		napi_disable(&queue->napi);
 
 	if (dev->phydev)
 		phy_stop(dev->phydev);
@@ -2270,7 +2312,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
 
 static void gem_update_stats(struct macb *bp)
 {
-	unsigned int i;
+	struct macb_queue *queue;
+	unsigned int i, q, idx;
+	unsigned long *stat;
+
 	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
 
 	for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
@@ -2287,6 +2332,11 @@ static void gem_update_stats(struct macb *bp)
 			*(++p) += val;
 		}
 	}
+
+	idx = GEM_STATS_LEN;
+	for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+		for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
+			bp->ethtool_stats[idx++] = *stat;
 }
 
 static struct net_device_stats *gem_get_stats(struct macb *bp)
@@ -2334,14 +2384,17 @@ static void gem_get_ethtool_stats(struct net_device *dev,
 
 	bp = netdev_priv(dev);
 	gem_update_stats(bp);
-	memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
+	memcpy(data, &bp->ethtool_stats, sizeof(u64)
+			* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
 }
 
 static int gem_get_sset_count(struct net_device *dev, int sset)
 {
+	struct macb *bp = netdev_priv(dev);
+
 	switch (sset) {
 	case ETH_SS_STATS:
-		return GEM_STATS_LEN;
+		return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -2349,13 +2402,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
 
 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
 {
+	char stat_string[ETH_GSTRING_LEN];
+	struct macb *bp = netdev_priv(dev);
+	struct macb_queue *queue;
 	unsigned int i;
+	unsigned int q;
 
 	switch (sset) {
 	case ETH_SS_STATS:
 		for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
 			memcpy(p, gem_statistics[i].stat_string,
 			       ETH_GSTRING_LEN);
+
+		for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+			for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
+				snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
+						q, queue_statistics[i].stat_string);
+				memcpy(p, stat_string, ETH_GSTRING_LEN);
+			}
+		}
 		break;
 	}
 }
@@ -2603,6 +2668,307 @@ static int macb_get_ts_info(struct net_device *netdev,
 	return ethtool_op_get_ts_info(netdev, info);
 }
 
+static void gem_enable_flow_filters(struct macb *bp, bool enable)
+{
+	struct ethtool_rx_fs_item *item;
+	u32 t2_scr;
+	int num_t2_scr;
+
+	num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		struct ethtool_rx_flow_spec *fs = &item->fs;
+		struct ethtool_tcpip4_spec *tp4sp_m;
+
+		if (fs->location >= num_t2_scr)
+			continue;
+
+		t2_scr = gem_readl_n(bp, SCRT2, fs->location);
+
+		/* enable/disable screener regs for the flow entry */
+		t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
+
+		/* only enable fields with no masking */
+		tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+		if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
+			t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
+		else
+			t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
+
+		if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
+			t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
+		else
+			t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
+
+		if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
+			t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
+		else
+			t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
+
+		gem_writel_n(bp, SCRT2, fs->location, t2_scr);
+	}
+}
+
+static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
+{
+	struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
+	uint16_t index = fs->location;
+	u32 w0, w1, t2_scr;
+	bool cmp_a = false;
+	bool cmp_b = false;
+	bool cmp_c = false;
+
+	tp4sp_v = &(fs->h_u.tcp_ip4_spec);
+	tp4sp_m = &(fs->m_u.tcp_ip4_spec);
+
+	/* ignore field if any masking set */
+	if (tp4sp_m->ip4src == 0xFFFFFFFF) {
+		/* 1st compare reg - IP source address */
+		w0 = 0;
+		w1 = 0;
+		w0 = tp4sp_v->ip4src;
+		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+		w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
+		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
+		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
+		cmp_a = true;
+	}
+
+	/* ignore field if any masking set */
+	if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
+		/* 2nd compare reg - IP destination address */
+		w0 = 0;
+		w1 = 0;
+		w0 = tp4sp_v->ip4dst;
+		w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
+		w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
+		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
+		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
+		cmp_b = true;
+	}
+
+	/* ignore both port fields if masking set in both */
+	if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
+		/* 3rd compare reg - source port, destination port */
+		w0 = 0;
+		w1 = 0;
+		w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
+		if (tp4sp_m->psrc == tp4sp_m->pdst) {
+			w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
+			w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+			w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
+			w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+		} else {
+			/* only one port definition */
+			w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
+			w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
+			if (tp4sp_m->psrc == 0xFFFF) { /* src port */
+				w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
+				w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
+			} else { /* dst port */
+				w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
+				w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
+			}
+		}
+		gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
+		gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
+		cmp_c = true;
+	}
+
+	t2_scr = 0;
+	t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
+	t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
+	if (cmp_a)
+		t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
+	if (cmp_b)
+		t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
+	if (cmp_c)
+		t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
+	gem_writel_n(bp, SCRT2, index, t2_scr);
+}
+
+static int gem_add_flow_filter(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_flow_spec *fs = &cmd->fs;
+	struct ethtool_rx_fs_item *item, *newfs;
+	unsigned long flags;
+	int ret = -EINVAL;
+	bool added = false;
+
+	newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
+	if (newfs == NULL)
+		return -ENOMEM;
+	memcpy(&newfs->fs, fs, sizeof(newfs->fs));
+
+	netdev_dbg(netdev,
+			"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+			fs->flow_type, (int)fs->ring_cookie, fs->location,
+			htonl(fs->h_u.tcp_ip4_spec.ip4src),
+			htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+			htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+
+	spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+	/* find correct place to add in list */
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (item->fs.location > newfs->fs.location) {
+			list_add_tail(&newfs->list, &item->list);
+			added = true;
+			break;
+		} else if (item->fs.location == fs->location) {
+			netdev_err(netdev, "Rule not added: location %d not free!\n",
+					fs->location);
+			ret = -EBUSY;
+			goto err;
+		}
+	}
+	if (!added)
+		list_add_tail(&newfs->list, &bp->rx_fs_list.list);
+
+	gem_prog_cmp_regs(bp, fs);
+	bp->rx_fs_list.count++;
+	/* enable filtering if NTUPLE on */
+	if (netdev->features & NETIF_F_NTUPLE)
+		gem_enable_flow_filters(bp, 1);
+
+	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+	return 0;
+
+err:
+	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+	kfree(newfs);
+	return ret;
+}
+
+static int gem_del_flow_filter(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_fs_item *item;
+	struct ethtool_rx_flow_spec *fs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bp->rx_fs_lock, flags);
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (item->fs.location == cmd->fs.location) {
+			/* disable screener regs for the flow entry */
+			fs = &(item->fs);
+			netdev_dbg(netdev,
+					"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
+					fs->flow_type, (int)fs->ring_cookie, fs->location,
+					htonl(fs->h_u.tcp_ip4_spec.ip4src),
+					htonl(fs->h_u.tcp_ip4_spec.ip4dst),
+					htons(fs->h_u.tcp_ip4_spec.psrc),
+					htons(fs->h_u.tcp_ip4_spec.pdst));
+
+			gem_writel_n(bp, SCRT2, fs->location, 0);
+
+			list_del(&item->list);
+			bp->rx_fs_list.count--;
+			spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+			kfree(item);
+			return 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
+	return -EINVAL;
+}
+
+static int gem_get_flow_entry(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_fs_item *item;
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (item->fs.location == cmd->fs.location) {
+			memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int gem_get_all_flow_entries(struct net_device *netdev,
+		struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+	struct macb *bp = netdev_priv(netdev);
+	struct ethtool_rx_fs_item *item;
+	uint32_t cnt = 0;
+
+	list_for_each_entry(item, &bp->rx_fs_list.list, list) {
+		if (cnt == cmd->rule_cnt)
+			return -EMSGSIZE;
+		rule_locs[cnt] = item->fs.location;
+		cnt++;
+	}
+	cmd->data = bp->max_tuples;
+	cmd->rule_cnt = cnt;
+
+	return 0;
+}
+
+static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+		u32 *rule_locs)
+{
+	struct macb *bp = netdev_priv(netdev);
+	int ret = 0;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = bp->num_queues;
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = bp->rx_fs_list.count;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		ret = gem_get_flow_entry(netdev, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
+		break;
+	default:
+		netdev_err(netdev,
+			  "Command parameter %d is not supported\n", cmd->cmd);
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+	struct macb *bp = netdev_priv(netdev);
+	int ret;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		if ((cmd->fs.location >= bp->max_tuples)
+				|| (cmd->fs.ring_cookie >= bp->num_queues)) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = gem_add_flow_filter(netdev, cmd);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		ret = gem_del_flow_filter(netdev, cmd);
+		break;
+	default:
+		netdev_err(netdev,
+			  "Command parameter %d is not supported\n", cmd->cmd);
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
 static const struct ethtool_ops macb_ethtool_ops = {
 	.get_regs_len		= macb_get_regs_len,
 	.get_regs		= macb_get_regs,
@@ -2628,6 +2994,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
 	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
 	.get_ringparam		= macb_get_ringparam,
 	.set_ringparam		= macb_set_ringparam,
+	.get_rxnfc			= gem_get_rxnfc,
+	.set_rxnfc			= gem_set_rxnfc,
 };
 
 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2685,6 +3053,12 @@ static int macb_set_features(struct net_device *netdev,
 		gem_writel(bp, NCFGR, netcfg);
 	}
 
+	/* RX Flow Filters */
+	if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
+		bool turn_on = features & NETIF_F_NTUPLE;
+
+		gem_enable_flow_filters(bp, turn_on);
+	}
 	return 0;
 }
 
@@ -2850,7 +3224,7 @@ static int macb_init(struct platform_device *pdev)
 	struct macb *bp = netdev_priv(dev);
 	struct macb_queue *queue;
 	int err;
-	u32 val;
+	u32 val, reg;
 
 	bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
 	bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
@@ -2865,15 +3239,20 @@ static int macb_init(struct platform_device *pdev)
 
 		queue = &bp->queues[q];
 		queue->bp = bp;
+		netif_napi_add(dev, &queue->napi, macb_poll, 64);
 		if (hw_q) {
 			queue->ISR  = GEM_ISR(hw_q - 1);
 			queue->IER  = GEM_IER(hw_q - 1);
 			queue->IDR  = GEM_IDR(hw_q - 1);
 			queue->IMR  = GEM_IMR(hw_q - 1);
 			queue->TBQP = GEM_TBQP(hw_q - 1);
+			queue->RBQP = GEM_RBQP(hw_q - 1);
+			queue->RBQS = GEM_RBQS(hw_q - 1);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
 				queue->TBQPH = GEM_TBQPH(hw_q - 1);
+				queue->RBQPH = GEM_RBQPH(hw_q - 1);
+			}
 #endif
 		} else {
 			/* queue0 uses legacy registers */
@@ -2882,9 +3261,12 @@ static int macb_init(struct platform_device *pdev)
 			queue->IDR  = MACB_IDR;
 			queue->IMR  = MACB_IMR;
 			queue->TBQP = MACB_TBQP;
+			queue->RBQP = MACB_RBQP;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-			if (bp->hw_dma_cap & HW_DMA_CAP_64B)
+			if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
 				queue->TBQPH = MACB_TBQPH;
+				queue->RBQPH = MACB_RBQPH;
+			}
 #endif
 		}
 
@@ -2908,7 +3290,6 @@ static int macb_init(struct platform_device *pdev)
 	}
 
 	dev->netdev_ops = &macb_netdev_ops;
-	netif_napi_add(dev, &bp->napi, macb_poll, 64);
 
 	/* setup appropriated routines according to adapter type */
 	if (macb_is_gem(bp)) {
@@ -2941,6 +3322,30 @@ static int macb_init(struct platform_device *pdev)
 		dev->hw_features &= ~NETIF_F_SG;
 	dev->features = dev->hw_features;
 
+	/* Check RX Flow Filters support.
+	 * Max Rx flows set by availability of screeners & compare regs:
+	 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
+	 */
+	reg = gem_readl(bp, DCFG8);
+	bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
+			GEM_BFEXT(T2SCR, reg));
+	if (bp->max_tuples > 0) {
+		/* also needs one ethtype match to check IPv4 */
+		if (GEM_BFEXT(SCR2ETH, reg) > 0) {
+			/* program this reg now */
+			reg = 0;
+			reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
+			gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
+			/* Filtering is supported in hw but don't enable it in kernel now */
+			dev->hw_features |= NETIF_F_NTUPLE;
+			/* init Rx flow definitions */
+			INIT_LIST_HEAD(&bp->rx_fs_list.list);
+			bp->rx_fs_list.count = 0;
+			spin_lock_init(&bp->rx_fs_lock);
+		} else
+			bp->max_tuples = 0;
+	}
+
 	if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
 		val = 0;
 		if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
@@ -2977,34 +3382,35 @@ static int macb_init(struct platform_device *pdev)
 static int at91ether_start(struct net_device *dev)
 {
 	struct macb *lp = netdev_priv(dev);
+	struct macb_queue *q = &lp->queues[0];
 	struct macb_dma_desc *desc;
 	dma_addr_t addr;
 	u32 ctl;
 	int i;
 
-	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+	q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
 					 (AT91ETHER_MAX_RX_DESCR *
 					  macb_dma_desc_get_size(lp)),
-					 &lp->rx_ring_dma, GFP_KERNEL);
-	if (!lp->rx_ring)
+					 &q->rx_ring_dma, GFP_KERNEL);
+	if (!q->rx_ring)
 		return -ENOMEM;
 
-	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+	q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
 					    AT91ETHER_MAX_RX_DESCR *
 					    AT91ETHER_MAX_RBUFF_SZ,
-					    &lp->rx_buffers_dma, GFP_KERNEL);
-	if (!lp->rx_buffers) {
+					    &q->rx_buffers_dma, GFP_KERNEL);
+	if (!q->rx_buffers) {
 		dma_free_coherent(&lp->pdev->dev,
 				  AT91ETHER_MAX_RX_DESCR *
 				  macb_dma_desc_get_size(lp),
-				  lp->rx_ring, lp->rx_ring_dma);
-		lp->rx_ring = NULL;
+				  q->rx_ring, q->rx_ring_dma);
+		q->rx_ring = NULL;
 		return -ENOMEM;
 	}
 
-	addr = lp->rx_buffers_dma;
+	addr = q->rx_buffers_dma;
 	for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
-		desc = macb_rx_desc(lp, i);
+		desc = macb_rx_desc(q, i);
 		macb_set_addr(lp, desc, addr);
 		desc->ctrl = 0;
 		addr += AT91ETHER_MAX_RBUFF_SZ;
@@ -3014,10 +3420,10 @@ static int at91ether_start(struct net_device *dev)
 	desc->addr |= MACB_BIT(RX_WRAP);
 
 	/* Reset buffer index */
-	lp->rx_tail = 0;
+	q->rx_tail = 0;
 
 	/* Program address of descriptor list in Rx Buffer Queue register */
-	macb_writel(lp, RBQP, lp->rx_ring_dma);
+	macb_writel(lp, RBQP, q->rx_ring_dma);
 
 	/* Enable Receive and Transmit */
 	ctl = macb_readl(lp, NCR);
@@ -3064,6 +3470,7 @@ static int at91ether_open(struct net_device *dev)
 static int at91ether_close(struct net_device *dev)
 {
 	struct macb *lp = netdev_priv(dev);
+	struct macb_queue *q = &lp->queues[0];
 	u32 ctl;
 
 	/* Disable Receiver and Transmitter */
@@ -3084,13 +3491,13 @@ static int at91ether_close(struct net_device *dev)
 	dma_free_coherent(&lp->pdev->dev,
 			  AT91ETHER_MAX_RX_DESCR *
 			  macb_dma_desc_get_size(lp),
-			  lp->rx_ring, lp->rx_ring_dma);
-	lp->rx_ring = NULL;
+			  q->rx_ring, q->rx_ring_dma);
+	q->rx_ring = NULL;
 
 	dma_free_coherent(&lp->pdev->dev,
 			  AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
-			  lp->rx_buffers, lp->rx_buffers_dma);
-	lp->rx_buffers = NULL;
+			  q->rx_buffers, q->rx_buffers_dma);
+	q->rx_buffers = NULL;
 
 	return 0;
 }
@@ -3134,14 +3541,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 {
 	struct macb *lp = netdev_priv(dev);
+	struct macb_queue *q = &lp->queues[0];
 	struct macb_dma_desc *desc;
 	unsigned char *p_recv;
 	struct sk_buff *skb;
 	unsigned int pktlen;
 
-	desc = macb_rx_desc(lp, lp->rx_tail);
+	desc = macb_rx_desc(q, q->rx_tail);
 	while (desc->addr & MACB_BIT(RX_USED)) {
-		p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
+		p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
 		pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
 		skb = netdev_alloc_skb(dev, pktlen + 2);
 		if (skb) {
@@ -3163,12 +3571,12 @@ static void at91ether_rx(struct net_device *dev)
 		desc->addr &= ~MACB_BIT(RX_USED);
 
 		/* wrap after last buffer */
-		if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
-			lp->rx_tail = 0;
+		if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
+			q->rx_tail = 0;
 		else
-			lp->rx_tail++;
+			q->rx_tail++;
 
-		desc = macb_rx_desc(lp, lp->rx_tail);
+		desc = macb_rx_desc(q, q->rx_tail);
 	}
 }
 
@@ -3394,7 +3802,6 @@ static int macb_probe(struct platform_device *pdev)
 					      = macb_config->clk_init;
 	int (*init)(struct platform_device *) = macb_config->init;
 	struct device_node *np = pdev->dev.of_node;
-	struct device_node *phy_node;
 	struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
 	unsigned int queue_mask, num_queues;
 	struct macb_platform_data *pdata;
@@ -3500,18 +3907,6 @@ static int macb_probe(struct platform_device *pdev)
 	else
 		macb_get_hwaddr(bp);
 
-	/* Power up the PHY if there is a GPIO reset */
-	phy_node =  of_get_next_available_child(np, NULL);
-	if (phy_node) {
-		int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
-
-		if (gpio_is_valid(gpio)) {
-			bp->reset_gpio = gpio_to_desc(gpio);
-			gpiod_direction_output(bp->reset_gpio, 1);
-		}
-	}
-	of_node_put(phy_node);
-
 	err = of_get_phy_mode(np);
 	if (err < 0) {
 		pdata = dev_get_platdata(&pdev->dev);
@@ -3558,10 +3953,6 @@ static int macb_probe(struct platform_device *pdev)
 		of_phy_deregister_fixed_link(np);
 	mdiobus_free(bp->mii_bus);
 
-	/* Shutdown the PHY if there is a GPIO reset */
-	if (bp->reset_gpio)
-		gpiod_set_value(bp->reset_gpio, 0);
-
 err_out_free_netdev:
 	free_netdev(dev);
 
@@ -3592,10 +3983,6 @@ static int macb_remove(struct platform_device *pdev)
 		dev->phydev = NULL;
 		mdiobus_free(bp->mii_bus);
 
-		/* Shutdown the PHY if there is a GPIO reset */
-		if (bp->reset_gpio)
-			gpiod_set_value(bp->reset_gpio, 0);
-
 		unregister_netdev(dev);
 		clk_disable_unprepare(bp->tx_clk);
 		clk_disable_unprepare(bp->hclk);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 2c615ab..f38abf6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
 	size = octdevsize + priv_size + configsize +
 		(sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
 
-	buf = vmalloc(size);
+	buf = vzalloc(size);
 	if (!buf)
 		return NULL;
 
-	memset(buf, 0, size);
-
 	oct = (struct octeon_device *)buf;
 	oct->priv = (void *)(buf + octdevsize);
 	oct->chip = (void *)(buf + octdevsize + priv_size);
@@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device  *oct)
 
 	size = sizeof(struct octeon_ioq_vector) * num_ioqs;
 
-	oct->ioq_vector = vmalloc(size);
+	oct->ioq_vector = vzalloc(size);
 	if (!oct->ioq_vector)
 		return 1;
-	memset(oct->ioq_vector, 0, size);
 	for (i = 0; i < num_ioqs; i++) {
 		ioq_vector		= &oct->ioq_vector[i];
 		ioq_vector->oct_dev	= oct;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a063c36..21618d0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -65,6 +65,11 @@ module_param(cpi_alg, int, S_IRUGO);
 MODULE_PARM_DESC(cpi_alg,
 		 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
 
+struct nicvf_xdp_tx {
+	u64 dma_addr;
+	u8  qidx;
+};
+
 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
 {
 	if (nic->sqs_mode)
@@ -500,14 +505,29 @@ static int nicvf_init_resources(struct nicvf *nic)
 	return 0;
 }
 
+static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
+{
+	/* Check if it's a recycled page, if not unmap the DMA mapping.
+	 * Recycled page holds an extra reference.
+	 */
+	if (page_ref_count(page) == 1) {
+		dma_addr &= PAGE_MASK;
+		dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+				     RCV_FRAG_LEN + XDP_HEADROOM,
+				     DMA_FROM_DEVICE,
+				     DMA_ATTR_SKIP_CPU_SYNC);
+	}
+}
+
 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 				struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
-				struct sk_buff **skb)
+				struct rcv_queue *rq, struct sk_buff **skb)
 {
 	struct xdp_buff xdp;
 	struct page *page;
+	struct nicvf_xdp_tx *xdp_tx = NULL;
 	u32 action;
-	u16 len, offset = 0;
+	u16 len, err, offset = 0;
 	u64 dma_addr, cpu_addr;
 	void *orig_data;
 
@@ -521,10 +541,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 	cpu_addr = (u64)phys_to_virt(cpu_addr);
 	page = virt_to_page((void *)cpu_addr);
 
-	xdp.data_hard_start = page_address(page);
+	xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
 	xdp.data = (void *)cpu_addr;
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = xdp.data + len;
+	xdp.rxq = &rq->xdp_rxq;
 	orig_data = xdp.data;
 
 	rcu_read_lock();
@@ -540,18 +561,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 
 	switch (action) {
 	case XDP_PASS:
-		/* Check if it's a recycled page, if not
-		 * unmap the DMA mapping.
-		 *
-		 * Recycled page holds an extra reference.
-		 */
-		if (page_ref_count(page) == 1) {
-			dma_addr &= PAGE_MASK;
-			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
-					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
-					     DMA_FROM_DEVICE,
-					     DMA_ATTR_SKIP_CPU_SYNC);
-		}
+		nicvf_unmap_page(nic, page, dma_addr);
 
 		/* Build SKB and pass on packet to network stack */
 		*skb = build_skb(xdp.data,
@@ -564,6 +574,20 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 	case XDP_TX:
 		nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
 		return true;
+	case XDP_REDIRECT:
+		/* Save DMA address for use while transmitting */
+		xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+		xdp_tx->dma_addr = dma_addr;
+		xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+		err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
+		if (!err)
+			return true;
+
+		/* Free the page on error */
+		nicvf_unmap_page(nic, page, dma_addr);
+		put_page(page);
+		break;
 	default:
 		bpf_warn_invalid_xdp_action(action);
 		/* fall through */
@@ -571,18 +595,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
 		trace_xdp_exception(nic->netdev, prog, action);
 		/* fall through */
 	case XDP_DROP:
-		/* Check if it's a recycled page, if not
-		 * unmap the DMA mapping.
-		 *
-		 * Recycled page holds an extra reference.
-		 */
-		if (page_ref_count(page) == 1) {
-			dma_addr &= PAGE_MASK;
-			dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
-					     RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
-					     DMA_FROM_DEVICE,
-					     DMA_ATTR_SKIP_CPU_SYNC);
-		}
+		nicvf_unmap_page(nic, page, dma_addr);
 		put_page(page);
 		return true;
 	}
@@ -686,7 +699,8 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
 
 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 				  struct napi_struct *napi,
-				  struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
+				  struct cqe_rx_t *cqe_rx,
+				  struct snd_queue *sq, struct rcv_queue *rq)
 {
 	struct sk_buff *skb = NULL;
 	struct nicvf *nic = netdev_priv(netdev);
@@ -712,7 +726,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 	/* For XDP, ignore pkts spanning multiple pages */
 	if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
 		/* Packet consumed by XDP */
-		if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
+		if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
 			return;
 	} else {
 		skb = nicvf_get_rcv_skb(snic, cqe_rx,
@@ -769,6 +783,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
 	struct cqe_rx_t *cq_desc;
 	struct netdev_queue *txq;
 	struct snd_queue *sq = &qs->sq[cq_idx];
+	struct rcv_queue *rq = &qs->rq[cq_idx];
 	unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
 
 	spin_lock_bh(&cq->lock);
@@ -799,7 +814,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
 
 		switch (cq_desc->cqe_type) {
 		case CQE_TYPE_RX:
-			nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
+			nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
 			work_done++;
 		break;
 		case CQE_TYPE_SEND:
@@ -1764,6 +1779,50 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
 	}
 }
 
+static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	struct nicvf *snic = nic;
+	struct nicvf_xdp_tx *xdp_tx;
+	struct snd_queue *sq;
+	struct page *page;
+	int err, qidx;
+
+	if (!netif_running(netdev) || !nic->xdp_prog)
+		return -EINVAL;
+
+	page = virt_to_page(xdp->data);
+	xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
+	qidx = xdp_tx->qidx;
+
+	if (xdp_tx->qidx >= nic->xdp_tx_queues)
+		return -EINVAL;
+
+	/* Get secondary Qset's info */
+	if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
+		qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
+		snic = (struct nicvf *)nic->snicvf[qidx - 1];
+		if (!snic)
+			return -EINVAL;
+		qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
+	}
+
+	sq = &snic->qs->sq[qidx];
+	err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
+				      xdp_tx->dma_addr,
+				      xdp->data_end - xdp->data);
+	if (err)
+		return -ENOMEM;
+
+	nicvf_xdp_sq_doorbell(snic, sq, qidx);
+	return 0;
+}
+
+static void nicvf_xdp_flush(struct net_device *dev)
+{
+	return;
+}
+
 static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_open		= nicvf_open,
 	.ndo_stop		= nicvf_stop,
@@ -1775,6 +1834,8 @@ static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_fix_features       = nicvf_fix_features,
 	.ndo_set_features       = nicvf_set_features,
 	.ndo_bpf		= nicvf_xdp,
+	.ndo_xdp_xmit		= nicvf_xdp_xmit,
+	.ndo_xdp_flush          = nicvf_xdp_flush,
 };
 
 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1833,6 +1894,11 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	nic->pdev = pdev;
 	nic->pnicvf = nic;
 	nic->max_queues = qcount;
+	/* If no of CPUs are too low, there won't be any queues left
+	 * for XDP_TX, hence double it.
+	 */
+	if (!nic->t88)
+		nic->max_queues *= 2;
 
 	/* MAP VF's configuration registers */
 	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a3d12db..14e62c6 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
 
 	/* Reserve space for header modifications by BPF program */
 	if (rbdr->is_xdp)
-		buf_len += XDP_PACKET_HEADROOM;
+		buf_len += XDP_HEADROOM;
 
 	/* Check if it's recycled */
 	if (pgcache)
@@ -224,8 +224,9 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
 			nic->rb_page = NULL;
 			return -ENOMEM;
 		}
+
 		if (pgcache)
-			pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+			pgcache->dma_addr = *rbuf + XDP_HEADROOM;
 		nic->rb_page_offset += buf_len;
 	}
 
@@ -759,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 
 	if (!rq->enable) {
 		nicvf_reclaim_rcv_queue(nic, qs, qidx);
+		xdp_rxq_info_unreg(&rq->xdp_rxq);
 		return;
 	}
 
@@ -771,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 	/* all writes of RBDR data to be loaded into L2 Cache as well*/
 	rq->caching = 1;
 
+	/* Driver have no proper error path for failed XDP RX-queue info reg */
+	WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0);
+
 	/* Send a mailbox msg to PF to config RQ */
 	mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
 	mbx.rq.qs_num = qs->vnic_id;
@@ -1236,7 +1241,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
 	int qentry;
 
 	if (subdesc_cnt > sq->xdp_free_cnt)
-		return 0;
+		return -1;
 
 	qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 
@@ -1247,7 +1252,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
 
 	sq->xdp_desc_cnt += subdesc_cnt;
 
-	return 1;
+	return 0;
 }
 
 /* Calculate no of SQ subdescriptors needed to transmit all
@@ -1625,7 +1630,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
 		if (page_ref_count(page) != 1)
 			return;
 
-		len += XDP_PACKET_HEADROOM;
+		len += XDP_HEADROOM;
 		/* Receive buffers in XDP mode are mapped from page start */
 		dma_addr &= PAGE_MASK;
 	}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 67d1a32..7d1e4e2 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,6 +11,8 @@
 
 #include <linux/netdevice.h>
 #include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
 #include "q_struct.h"
 
 #define MAX_QUEUE_SET			128
@@ -92,6 +94,9 @@
 #define RCV_FRAG_LEN	 (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
+#define RCV_BUF_HEADROOM	128 /* To store dma address for XDP redirect */
+#define XDP_HEADROOM		(XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
+
 #define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
 				 MAX_CQE_PER_PKT_XMIT)
 
@@ -251,6 +256,7 @@ struct rcv_queue {
 	u8		start_qs_rbdr_idx; /* RBDR idx in the above QS */
 	u8		caching;
 	struct		rx_tx_queue_stats stats;
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 struct cmp_queue {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 6056899..b57acb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -18,17 +18,15 @@
 #ifndef __CUDBG_ENTITY_H__
 #define __CUDBG_ENTITY_H__
 
-#define EDC0_FLAG 3
-#define EDC1_FLAG 4
+#define EDC0_FLAG 0
+#define EDC1_FLAG 1
+#define MC_FLAG 2
+#define MC0_FLAG 3
+#define MC1_FLAG 4
+#define HMA_FLAG 5
 
 #define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
 
-struct card_mem {
-	u16 size_edc0;
-	u16 size_edc1;
-	u16 mem_flag;
-};
-
 struct cudbg_mbox_log {
 	struct mbox_cmd entry;
 	u32 hi[MBOX_LEN / 8];
@@ -87,6 +85,48 @@ struct cudbg_tp_la {
 	u8 data[0];
 };
 
+static const char * const cudbg_region[] = {
+	"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+	"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+	"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+	"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+	"RQUDP region:", "PBL region:", "TXPBL region:",
+	"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+	"On-chip queues:"
+};
+
+/* Memory region info relative to current memory (i.e. wrt 0). */
+struct cudbg_region_info {
+	bool exist; /* Does region exists in current memory? */
+	u32 start;  /* Start wrt 0 */
+	u32 end;    /* End wrt 0 */
+};
+
+struct cudbg_mem_desc {
+	u32 base;
+	u32 limit;
+	u32 idx;
+};
+
+struct cudbg_meminfo {
+	struct cudbg_mem_desc avail[4];
+	struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
+	u32 avail_c;
+	u32 mem_c;
+	u32 up_ram_lo;
+	u32 up_ram_hi;
+	u32 up_extmem2_lo;
+	u32 up_extmem2_hi;
+	u32 rx_pages_data[3];
+	u32 tx_pages_data[4];
+	u32 p_structs;
+	u32 reserved[12];
+	u32 port_used[4];
+	u32 port_alloc[4];
+	u32 loopback_used[NCHAN];
+	u32 loopback_alloc[NCHAN];
+};
+
 struct cudbg_cim_pif_la {
 	int size;
 	u8 data[0];
@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
 	u32 reserved[16];
 };
 
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
 #define CUDBG_MAX_FL_QIDS 1024
 
 struct cudbg_ch_cntxt {
@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
 	{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
 };
 
+#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
+
+static const u32 t5_pcie_config_array[][2] = {
+	{0x0, 0x34},
+	{0x3c, 0x40},
+	{0x50, 0x64},
+	{0x70, 0x80},
+	{0x94, 0xa0},
+	{0xb0, 0xb8},
+	{0xd0, 0xd4},
+	{0x100, 0x128},
+	{0x140, 0x148},
+	{0x150, 0x164},
+	{0x170, 0x178},
+	{0x180, 0x194},
+	{0x1a0, 0x1b8},
+	{0x1c0, 0x208},
+};
+
 static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
 	{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
 	{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
@@ -345,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
 	{0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
 };
 
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
-	{0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
-	{0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
-	{0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
-	{0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
-	{0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
-	{0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
-	{0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
-	{0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
-	{0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
-	{0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
-	{0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
-	{0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
-	{0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
-
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+	{0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+	{0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+	{0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+	{0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+	{0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+	{0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+	{0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+	{0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+	{0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+	{0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+	{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+	{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+	{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+	{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+	{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+	{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+	{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+	{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+	{0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+	{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+	{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+	{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
 };
 
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
-	{0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
-	{0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
-	{0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
-	{0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
-	{0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
-	{0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
-	{0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
-	{0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
-	{0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
-	{0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
-	{0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
-	{0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
-	{0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+	{0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+	{0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+	{0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+	{0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+	{0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+	{0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+	{0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+	{0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+	{0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+	{0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+	{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+	{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+	{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+	{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+	{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+	{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+	{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+	{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+	{0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+	{0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+	{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+	{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+	{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
 };
 
 static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index e10ff1e..88e7400 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -21,6 +21,7 @@
 /* Error codes */
 #define CUDBG_STATUS_NO_MEM -19
 #define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
 #define CUDBG_SYSTEM_ERROR -29
 #define CUDBG_STATUS_CCLK_NOT_DEFINED -32
 
@@ -47,6 +48,8 @@ enum cudbg_dbg_entity_type {
 	CUDBG_CIM_OBQ_NCSI = 17,
 	CUDBG_EDC0 = 18,
 	CUDBG_EDC1 = 19,
+	CUDBG_MC0 = 20,
+	CUDBG_MC1 = 21,
 	CUDBG_RSS = 22,
 	CUDBG_RSS_VF_CONF = 25,
 	CUDBG_PATH_MTU = 27,
@@ -56,6 +59,7 @@ enum cudbg_dbg_entity_type {
 	CUDBG_SGE_INDIRECT = 37,
 	CUDBG_ULPRX_LA = 41,
 	CUDBG_TP_LA = 43,
+	CUDBG_MEMINFO = 44,
 	CUDBG_CIM_PIF_LA = 45,
 	CUDBG_CLK = 46,
 	CUDBG_CIM_OBQ_RXQ0 = 47,
@@ -63,6 +67,7 @@ enum cudbg_dbg_entity_type {
 	CUDBG_PCIE_INDIRECT = 50,
 	CUDBG_PM_INDIRECT = 51,
 	CUDBG_TID_INFO = 54,
+	CUDBG_PCIE_CONFIG = 55,
 	CUDBG_DUMP_CONTEXT = 56,
 	CUDBG_MPS_TCAM = 57,
 	CUDBG_VPD_DATA = 58,
@@ -74,6 +79,7 @@ enum cudbg_dbg_entity_type {
 	CUDBG_PBT_TABLE = 65,
 	CUDBG_MBOX_LOG = 66,
 	CUDBG_HMA_INDIRECT = 67,
+	CUDBG_HMA = 68,
 	CUDBG_MAX_ENTITY = 70,
 };
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d699bf8..0a3871f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -15,12 +15,14 @@
  *
  */
 
+#include <linux/sort.h>
+
 #include "t4_regs.h"
 #include "cxgb4.h"
 #include "cudbg_if.h"
 #include "cudbg_lib_common.h"
-#include "cudbg_lib.h"
 #include "cudbg_entity.h"
+#include "cudbg_lib.h"
 
 static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
 					 struct cudbg_buffer *dbg_buff)
@@ -84,6 +86,277 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
 	return 0;
 }
 
+static int cudbg_mem_desc_cmp(const void *a, const void *b)
+{
+	return ((const struct cudbg_mem_desc *)a)->base -
+	       ((const struct cudbg_mem_desc *)b)->base;
+}
+
+int cudbg_fill_meminfo(struct adapter *padap,
+		       struct cudbg_meminfo *meminfo_buff)
+{
+	struct cudbg_mem_desc *md;
+	u32 lo, hi, used, alloc;
+	int n, i;
+
+	memset(meminfo_buff->avail, 0,
+	       ARRAY_SIZE(meminfo_buff->avail) *
+	       sizeof(struct cudbg_mem_desc));
+	memset(meminfo_buff->mem, 0,
+	       (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
+	md  = meminfo_buff->mem;
+
+	for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
+		meminfo_buff->mem[i].limit = 0;
+		meminfo_buff->mem[i].idx = i;
+	}
+
+	/* Find and sort the populated memory ranges */
+	i = 0;
+	lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
+	if (lo & EDRAM0_ENABLE_F) {
+		hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
+		meminfo_buff->avail[i].base =
+			cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
+		meminfo_buff->avail[i].limit =
+			meminfo_buff->avail[i].base +
+			cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
+		meminfo_buff->avail[i].idx = 0;
+		i++;
+	}
+
+	if (lo & EDRAM1_ENABLE_F) {
+		hi =  t4_read_reg(padap, MA_EDRAM1_BAR_A);
+		meminfo_buff->avail[i].base =
+			cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
+		meminfo_buff->avail[i].limit =
+			meminfo_buff->avail[i].base +
+			cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
+		meminfo_buff->avail[i].idx = 1;
+		i++;
+	}
+
+	if (is_t5(padap->params.chip)) {
+		if (lo & EXT_MEM0_ENABLE_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 3;
+			i++;
+		}
+
+		if (lo & EXT_MEM1_ENABLE_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 4;
+			i++;
+		}
+	} else {
+		if (lo & EXT_MEM_ENABLE_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 2;
+			i++;
+		}
+
+		if (lo & HMA_MUX_F) {
+			hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+			meminfo_buff->avail[i].base =
+				cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+			meminfo_buff->avail[i].limit =
+				meminfo_buff->avail[i].base +
+				cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+			meminfo_buff->avail[i].idx = 5;
+			i++;
+		}
+	}
+
+	if (!i) /* no memory available */
+		return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+	meminfo_buff->avail_c = i;
+	sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
+	     cudbg_mem_desc_cmp, NULL);
+	(md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
+
+	/* the next few have explicit upper bounds */
+	md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
+		    PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
+	md++;
+
+	md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
+		    PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
+	md++;
+
+	if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
+		if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
+			hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
+			md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+		} else {
+			hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+			md->base = t4_read_reg(padap,
+					       LE_DB_HASH_TBL_BASE_ADDR_A);
+		}
+		md->limit = 0;
+	} else {
+		md->base = 0;
+		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
+	}
+	md++;
+
+#define ulp_region(reg) do { \
+	md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
+	(md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
+} while (0)
+
+	ulp_region(RX_ISCSI);
+	ulp_region(RX_TDDP);
+	ulp_region(TX_TPT);
+	ulp_region(RX_STAG);
+	ulp_region(RX_RQ);
+	ulp_region(RX_RQUDP);
+	ulp_region(RX_PBL);
+	ulp_region(TX_PBL);
+#undef ulp_region
+	md->base = 0;
+	md->idx = ARRAY_SIZE(cudbg_region);
+	if (!is_t4(padap->params.chip)) {
+		u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
+		u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
+		u32 size = 0;
+
+		if (is_t5(padap->params.chip)) {
+			if (sge_ctrl & VFIFO_ENABLE_F)
+				size = DBVFIFO_SIZE_G(fifo_size);
+		} else {
+			size = T6_DBVFIFO_SIZE_G(fifo_size);
+		}
+
+		if (size) {
+			md->base = BASEADDR_G(t4_read_reg(padap,
+							  SGE_DBVFIFO_BADDR_A));
+			md->limit = md->base + (size << 2) - 1;
+		}
+	}
+
+	md++;
+
+	md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
+	md->limit = 0;
+	md++;
+	md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
+	md->limit = 0;
+	md++;
+
+	md->base = padap->vres.ocq.start;
+	if (padap->vres.ocq.size)
+		md->limit = md->base + padap->vres.ocq.size - 1;
+	else
+		md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
+	md++;
+
+	/* add any address-space holes, there can be up to 3 */
+	for (n = 0; n < i - 1; n++)
+		if (meminfo_buff->avail[n].limit <
+		    meminfo_buff->avail[n + 1].base)
+			(md++)->base = meminfo_buff->avail[n].limit;
+
+	if (meminfo_buff->avail[n].limit)
+		(md++)->base = meminfo_buff->avail[n].limit;
+
+	n = md - meminfo_buff->mem;
+	meminfo_buff->mem_c = n;
+
+	sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
+	     cudbg_mem_desc_cmp, NULL);
+
+	lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
+	hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+	meminfo_buff->up_ram_lo = lo;
+	meminfo_buff->up_ram_hi = hi;
+
+	lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
+	hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+	meminfo_buff->up_extmem2_lo = lo;
+	meminfo_buff->up_extmem2_hi = hi;
+
+	lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+	meminfo_buff->rx_pages_data[0] =  PMRXMAXPAGE_G(lo);
+	meminfo_buff->rx_pages_data[1] =
+		t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
+	meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
+
+	lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
+	hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+	meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
+	meminfo_buff->tx_pages_data[1] =
+		hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
+	meminfo_buff->tx_pages_data[2] =
+		hi >= (1 << 20) ? 'M' : 'K';
+	meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
+
+	meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+
+	for (i = 0; i < 4; i++) {
+		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(padap,
+					 MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
+		if (is_t5(padap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		meminfo_buff->port_used[i] = used;
+		meminfo_buff->port_alloc[i] = alloc;
+	}
+
+	for (i = 0; i < padap->params.arch.nchan; i++) {
+		if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(padap,
+					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
+		if (is_t5(padap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		meminfo_buff->loopback_used[i] = used;
+		meminfo_buff->loopback_alloc[i] = alloc;
+	}
+
+	return 0;
+}
+
 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
 			   struct cudbg_buffer *dbg_buff,
 			   struct cudbg_error *cudbg_err)
@@ -420,23 +693,211 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
 	return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
 }
 
+static int cudbg_meminfo_get_mem_index(struct adapter *padap,
+				       struct cudbg_meminfo *mem_info,
+				       u8 mem_type, u8 *idx)
+{
+	u8 i, flag;
+
+	switch (mem_type) {
+	case MEM_EDC0:
+		flag = EDC0_FLAG;
+		break;
+	case MEM_EDC1:
+		flag = EDC1_FLAG;
+		break;
+	case MEM_MC0:
+		/* Some T5 cards have both MC0 and MC1. */
+		flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
+		break;
+	case MEM_MC1:
+		flag = MC1_FLAG;
+		break;
+	case MEM_HMA:
+		flag = HMA_FLAG;
+		break;
+	default:
+		return CUDBG_STATUS_ENTITY_NOT_FOUND;
+	}
+
+	for (i = 0; i < mem_info->avail_c; i++) {
+		if (mem_info->avail[i].idx == flag) {
+			*idx = i;
+			return 0;
+		}
+	}
+
+	return CUDBG_STATUS_ENTITY_NOT_FOUND;
+}
+
+/* Fetch the @region_name's start and end from @meminfo. */
+static int cudbg_get_mem_region(struct adapter *padap,
+				struct cudbg_meminfo *meminfo,
+				u8 mem_type, const char *region_name,
+				struct cudbg_mem_desc *mem_desc)
+{
+	u8 mc, found = 0;
+	u32 i, idx = 0;
+	int rc;
+
+	rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
+		if (!strcmp(cudbg_region[i], region_name)) {
+			found = 1;
+			idx = i;
+			break;
+		}
+	}
+	if (!found)
+		return -EINVAL;
+
+	found = 0;
+	for (i = 0; i < meminfo->mem_c; i++) {
+		if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
+			continue; /* Skip holes */
+
+		if (!(meminfo->mem[i].limit))
+			meminfo->mem[i].limit =
+				i < meminfo->mem_c - 1 ?
+				meminfo->mem[i + 1].base - 1 : ~0;
+
+		if (meminfo->mem[i].idx == idx) {
+			/* Check if the region exists in @mem_type memory */
+			if (meminfo->mem[i].base < meminfo->avail[mc].base &&
+			    meminfo->mem[i].limit < meminfo->avail[mc].base)
+				return -EINVAL;
+
+			if (meminfo->mem[i].base > meminfo->avail[mc].limit)
+				return -EINVAL;
+
+			memcpy(mem_desc, &meminfo->mem[i],
+			       sizeof(struct cudbg_mem_desc));
+			found = 1;
+			break;
+		}
+	}
+	if (!found)
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Fetch and update the start and end of the requested memory region w.r.t 0
+ * in the corresponding EDC/MC/HMA.
+ */
+static int cudbg_get_mem_relative(struct adapter *padap,
+				  struct cudbg_meminfo *meminfo,
+				  u8 mem_type, u32 *out_base, u32 *out_end)
+{
+	u8 mc_idx;
+	int rc;
+
+	rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
+	if (rc)
+		return rc;
+
+	if (*out_base < meminfo->avail[mc_idx].base)
+		*out_base = 0;
+	else
+		*out_base -= meminfo->avail[mc_idx].base;
+
+	if (*out_end > meminfo->avail[mc_idx].limit)
+		*out_end = meminfo->avail[mc_idx].limit;
+	else
+		*out_end -= meminfo->avail[mc_idx].base;
+
+	return 0;
+}
+
+/* Get TX and RX Payload region */
+static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
+				   const char *region_name,
+				   struct cudbg_region_info *payload)
+{
+	struct cudbg_mem_desc mem_desc = { 0 };
+	struct cudbg_meminfo meminfo;
+	int rc;
+
+	rc = cudbg_fill_meminfo(padap, &meminfo);
+	if (rc)
+		return rc;
+
+	rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
+				  &mem_desc);
+	if (rc) {
+		payload->exist = false;
+		return 0;
+	}
+
+	payload->exist = true;
+	payload->start = mem_desc.base;
+	payload->end = mem_desc.limit;
+
+	return cudbg_get_mem_relative(padap, &meminfo, mem_type,
+				      &payload->start, &payload->end);
+}
+
+#define CUDBG_YIELD_ITERATION 256
+
 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
 			     struct cudbg_buffer *dbg_buff, u8 mem_type,
 			     unsigned long tot_len,
 			     struct cudbg_error *cudbg_err)
 {
+	static const char * const region_name[] = { "Tx payload:",
+						    "Rx payload:" };
 	unsigned long bytes, bytes_left, bytes_read = 0;
 	struct adapter *padap = pdbg_init->adap;
 	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_region_info payload[2];
+	u32 yield_count = 0;
 	int rc = 0;
+	u8 i;
+
+	/* Get TX/RX Payload region range if they exist */
+	memset(payload, 0, sizeof(payload));
+	for (i = 0; i < ARRAY_SIZE(region_name); i++) {
+		rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
+					     &payload[i]);
+		if (rc)
+			return rc;
+
+		if (payload[i].exist) {
+			/* Align start and end to avoid wrap around */
+			payload[i].start = roundup(payload[i].start,
+						   CUDBG_CHUNK_SIZE);
+			payload[i].end = rounddown(payload[i].end,
+						   CUDBG_CHUNK_SIZE);
+		}
+	}
 
 	bytes_left = tot_len;
 	while (bytes_left > 0) {
+		/* As MC size is huge and read through PIO access, this
+		 * loop will hold cpu for a longer time. OS may think that
+		 * the process is hanged and will generate CPU stall traces.
+		 * So yield the cpu regularly.
+		 */
+		yield_count++;
+		if (!(yield_count % CUDBG_YIELD_ITERATION))
+			schedule();
+
 		bytes = min_t(unsigned long, bytes_left,
 			      (unsigned long)CUDBG_CHUNK_SIZE);
 		rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
 		if (rc)
 			return rc;
+
+		for (i = 0; i < ARRAY_SIZE(payload); i++)
+			if (payload[i].exist &&
+			    bytes_read >= payload[i].start &&
+			    bytes_read + bytes <= payload[i].end)
+				/* TX and RX Payload regions can't overlap */
+				goto skip_read;
+
 		spin_lock(&padap->win0_lock);
 		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
 				  bytes_read, bytes,
@@ -448,6 +909,8 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
 			cudbg_put_buff(&temp_buff, dbg_buff);
 			return rc;
 		}
+
+skip_read:
 		bytes_left -= bytes;
 		bytes_read += bytes;
 		cudbg_write_and_release_buff(&temp_buff, dbg_buff);
@@ -455,27 +918,6 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
-static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
-				   struct card_mem *mem_info)
-{
-	struct adapter *padap = pdbg_init->adap;
-	u32 value;
-
-	value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
-	value = EDRAM0_SIZE_G(value);
-	mem_info->size_edc0 = (u16)value;
-
-	value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
-	value = EDRAM1_SIZE_G(value);
-	mem_info->size_edc1 = (u16)value;
-
-	value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
-	if (value & EDRAM0_ENABLE_F)
-		mem_info->mem_flag |= (1 << EDC0_FLAG);
-	if (value & EDRAM1_ENABLE_F)
-		mem_info->mem_flag |= (1 << EDC1_FLAG);
-}
-
 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
 			     struct cudbg_error *cudbg_err)
 {
@@ -495,37 +937,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
 				    struct cudbg_error *cudbg_err,
 				    u8 mem_type)
 {
-	struct card_mem mem_info = {0};
-	unsigned long flag, size;
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_meminfo mem_info;
+	unsigned long size;
+	u8 mc_idx;
 	int rc;
 
-	cudbg_t4_fwcache(pdbg_init, cudbg_err);
-	cudbg_collect_mem_info(pdbg_init, &mem_info);
-	switch (mem_type) {
-	case MEM_EDC0:
-		flag = (1 << EDC0_FLAG);
-		size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
-		break;
-	case MEM_EDC1:
-		flag = (1 << EDC1_FLAG);
-		size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
-		break;
-	default:
-		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
-		goto err;
-	}
+	memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
+	rc = cudbg_fill_meminfo(padap, &mem_info);
+	if (rc)
+		return rc;
 
-	if (mem_info.mem_flag & flag) {
-		rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
-				       size, cudbg_err);
-		if (rc)
-			goto err;
-	} else {
-		rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
-		goto err;
-	}
-err:
-	return rc;
+	cudbg_t4_fwcache(pdbg_init, cudbg_err);
+	rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
+	if (rc)
+		return rc;
+
+	size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+	return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+				 cudbg_err);
 }
 
 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
@@ -544,15 +974,40 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
 					MEM_EDC1);
 }
 
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
+{
+	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+					MEM_MC0);
+}
+
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
+{
+	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+					MEM_MC1);
+}
+
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
+{
+	return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+					MEM_HMA);
+}
+
 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
 		      struct cudbg_buffer *dbg_buff,
 		      struct cudbg_error *cudbg_err)
 {
 	struct adapter *padap = pdbg_init->adap;
 	struct cudbg_buffer temp_buff = { 0 };
-	int rc;
+	int rc, nentries;
 
-	rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+	nentries = t4_chip_rss_size(padap);
+	rc = cudbg_get_buff(dbg_buff, nentries * sizeof(u16), &temp_buff);
 	if (rc)
 		return rc;
 
@@ -843,6 +1298,31 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+			  struct cudbg_buffer *dbg_buff,
+			  struct cudbg_error *cudbg_err)
+{
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	struct cudbg_meminfo *meminfo_buff;
+	int rc;
+
+	rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_meminfo), &temp_buff);
+	if (rc)
+		return rc;
+
+	meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+	rc = cudbg_fill_meminfo(padap, meminfo_buff);
+	if (rc) {
+		cudbg_err->sys_err = rc;
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return rc;
+	}
+
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
 			     struct cudbg_buffer *dbg_buff,
 			     struct cudbg_error *cudbg_err)
@@ -1115,22 +1595,135 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 	return rc;
 }
 
-int cudbg_dump_context_size(struct adapter *padap)
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err)
 {
-	u32 value, size;
-	u8 flq;
+	struct adapter *padap = pdbg_init->adap;
+	struct cudbg_buffer temp_buff = { 0 };
+	u32 size, *value, j;
+	int i, rc, n;
 
+	size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+	n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
+	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+	if (rc)
+		return rc;
+
+	value = (u32 *)temp_buff.data;
+	for (i = 0; i < n; i++) {
+		for (j = t5_pcie_config_array[i][0];
+		     j <= t5_pcie_config_array[i][1]; j += 4) {
+			t4_hw_pci_read_cfg4(padap, j, value);
+			value++;
+		}
+	}
+	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+	return rc;
+}
+
+static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
+{
+	int index, bit, bit_pos = 0;
+
+	switch (type) {
+	case CTXT_EGRESS:
+		bit_pos = 176;
+		break;
+	case CTXT_INGRESS:
+		bit_pos = 141;
+		break;
+	case CTXT_FLM:
+		bit_pos = 89;
+		break;
+	}
+	index = bit_pos / 32;
+	bit =  bit_pos % 32;
+	return buf[index] & (1U << bit);
+}
+
+static int cudbg_get_ctxt_region_info(struct adapter *padap,
+				      struct cudbg_region_info *ctx_info,
+				      u8 *mem_type)
+{
+	struct cudbg_mem_desc mem_desc;
+	struct cudbg_meminfo meminfo;
+	u32 i, j, value, found;
+	u8 flq;
+	int rc;
+
+	rc = cudbg_fill_meminfo(padap, &meminfo);
+	if (rc)
+		return rc;
+
+	/* Get EGRESS and INGRESS context region size */
+	for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+		found = 0;
+		memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
+		for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
+			rc = cudbg_get_mem_region(padap, &meminfo, j,
+						  cudbg_region[i],
+						  &mem_desc);
+			if (!rc) {
+				found = 1;
+				rc = cudbg_get_mem_relative(padap, &meminfo, j,
+							    &mem_desc.base,
+							    &mem_desc.limit);
+				if (rc) {
+					ctx_info[i].exist = false;
+					break;
+				}
+				ctx_info[i].exist = true;
+				ctx_info[i].start = mem_desc.base;
+				ctx_info[i].end = mem_desc.limit;
+				mem_type[i] = j;
+				break;
+			}
+		}
+		if (!found)
+			ctx_info[i].exist = false;
+	}
+
+	/* Get FLM and CNM max qid. */
 	value = t4_read_reg(padap, SGE_FLM_CFG_A);
 
 	/* Get number of data freelist queues */
 	flq = HDRSTARTFLQ_G(value);
-	size = CUDBG_MAX_FL_QIDS >> flq;
+	ctx_info[CTXT_FLM].exist = true;
+	ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
 
-	/* Add extra space for congestion manager contexts.
-	 * The number of CONM contexts are same as number of freelist
+	/* The number of CONM contexts are same as number of freelist
 	 * queues.
 	 */
-	size += size;
+	ctx_info[CTXT_CNM].exist = true;
+	ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
+
+	return 0;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+	struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
+	u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+	u32 i, size = 0;
+	int rc;
+
+	/* Get max valid qid for each type of queue */
+	rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < CTXT_CNM; i++) {
+		if (!region_info[i].exist) {
+			if (i == CTXT_EGRESS || i == CTXT_INGRESS)
+				size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
+					SGE_CTXT_SIZE;
+			continue;
+		}
+
+		size += (region_info[i].end - region_info[i].start + 1) /
+			SGE_CTXT_SIZE;
+	}
 	return size * sizeof(struct cudbg_ch_cntxt);
 }
 
@@ -1153,16 +1746,54 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
 		t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
 }
 
+static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
+				  u8 ctxt_type,
+				  struct cudbg_ch_cntxt **out_buff)
+{
+	struct cudbg_ch_cntxt *buff = *out_buff;
+	int rc;
+	u32 j;
+
+	for (j = 0; j < max_qid; j++) {
+		cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
+		rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
+		if (!rc)
+			continue;
+
+		buff->cntxt_type = ctxt_type;
+		buff->cntxt_id = j;
+		buff++;
+		if (ctxt_type == CTXT_FLM) {
+			cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
+			buff->cntxt_type = CTXT_CNM;
+			buff->cntxt_id = j;
+			buff++;
+		}
+	}
+
+	*out_buff = buff;
+}
+
 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err)
 {
+	struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
 	struct adapter *padap = pdbg_init->adap;
+	u32 j, size, max_ctx_size, max_ctx_qid;
+	u8 mem_type[CTXT_INGRESS + 1] = { 0 };
 	struct cudbg_buffer temp_buff = { 0 };
 	struct cudbg_ch_cntxt *buff;
-	u32 size, i = 0;
+	u64 *dst_off, *src_off;
+	u8 *ctx_buf;
+	u8 i, k;
 	int rc;
 
+	/* Get max valid qid for each type of queue */
+	rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+	if (rc)
+		return rc;
+
 	rc = cudbg_dump_context_size(padap);
 	if (rc <= 0)
 		return CUDBG_STATUS_ENTITY_NOT_FOUND;
@@ -1172,23 +1803,79 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
 	if (rc)
 		return rc;
 
-	buff = (struct cudbg_ch_cntxt *)temp_buff.data;
-	while (size > 0) {
-		buff->cntxt_type = CTXT_FLM;
-		buff->cntxt_id = i;
-		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
-		buff++;
-		size -= sizeof(struct cudbg_ch_cntxt);
+	/* Get buffer with enough space to read the biggest context
+	 * region in memory.
+	 */
+	max_ctx_size = max(region_info[CTXT_EGRESS].end -
+			   region_info[CTXT_EGRESS].start + 1,
+			   region_info[CTXT_INGRESS].end -
+			   region_info[CTXT_INGRESS].start + 1);
 
-		buff->cntxt_type = CTXT_CNM;
-		buff->cntxt_id = i;
-		cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
-		buff++;
-		size -= sizeof(struct cudbg_ch_cntxt);
-
-		i++;
+	ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
+	if (!ctx_buf) {
+		cudbg_put_buff(&temp_buff, dbg_buff);
+		return -ENOMEM;
 	}
 
+	buff = (struct cudbg_ch_cntxt *)temp_buff.data;
+
+	/* Collect EGRESS and INGRESS context data.
+	 * In case of failures, fallback to collecting via FW or
+	 * backdoor access.
+	 */
+	for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+		if (!region_info[i].exist) {
+			max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+			cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+					      &buff);
+			continue;
+		}
+
+		max_ctx_size = region_info[i].end - region_info[i].start + 1;
+		max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+
+		t4_sge_ctxt_flush(padap, padap->mbox, i);
+		rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
+				  region_info[i].start, max_ctx_size,
+				  (__be32 *)ctx_buf, 1);
+		if (rc) {
+			max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+			cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+					      &buff);
+			continue;
+		}
+
+		for (j = 0; j < max_ctx_qid; j++) {
+			src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+			dst_off = (u64 *)buff->data;
+
+			/* The data is stored in 64-bit cpu order.  Convert it
+			 * to big endian before parsing.
+			 */
+			for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
+				dst_off[k] = cpu_to_be64(src_off[k]);
+
+			rc = cudbg_sge_ctxt_check_valid(buff->data, i);
+			if (!rc)
+				continue;
+
+			buff->cntxt_type = i;
+			buff->cntxt_id = j;
+			buff++;
+		}
+	}
+
+	kvfree(ctx_buf);
+
+	/* Collect FREELIST and CONGESTION MANAGER contexts */
+	max_ctx_size = region_info[CTXT_FLM].end -
+		       region_info[CTXT_FLM].start + 1;
+	max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+	/* Since FLM and CONM are 1-to-1 mapped, the below function
+	 * will fetch both FLM and CONM contexts.
+	 */
+	cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
+
 	cudbg_write_and_release_buff(&temp_buff, dbg_buff);
 	return rc;
 }
@@ -1735,11 +2422,21 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 {
 	struct adapter *padap = pdbg_init->adap;
 	struct cudbg_buffer temp_buff = { 0 };
+	u32 local_offset, local_range;
 	struct ireg_buf *up_cim;
+	u32 size, j, iter;
+	u32 instance = 0;
 	int i, rc, n;
-	u32 size;
 
-	n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+	if (is_t5(padap->params.chip))
+		n = sizeof(t5_up_cim_reg_array) /
+		    ((IREG_NUM_ELEM + 1) * sizeof(u32));
+	else if (is_t6(padap->params.chip))
+		n = sizeof(t6_up_cim_reg_array) /
+		    ((IREG_NUM_ELEM + 1) * sizeof(u32));
+	else
+		return CUDBG_STATUS_NOT_IMPLEMENTED;
+
 	size = sizeof(struct ireg_buf) * n;
 	rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
 	if (rc)
@@ -1757,6 +2454,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 						t5_up_cim_reg_array[i][2];
 			up_cim_reg->ireg_offset_range =
 						t5_up_cim_reg_array[i][3];
+			instance = t5_up_cim_reg_array[i][4];
 		} else if (is_t6(padap->params.chip)) {
 			up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
 			up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
@@ -1764,13 +2462,35 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
 						t6_up_cim_reg_array[i][2];
 			up_cim_reg->ireg_offset_range =
 						t6_up_cim_reg_array[i][3];
+			instance = t6_up_cim_reg_array[i][4];
 		}
 
-		rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
-				 up_cim_reg->ireg_offset_range, buff);
-		if (rc) {
-			cudbg_put_buff(&temp_buff, dbg_buff);
-			return rc;
+		switch (instance) {
+		case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
+			iter = up_cim_reg->ireg_offset_range;
+			local_offset = 0x120;
+			local_range = 1;
+			break;
+		case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
+			iter = up_cim_reg->ireg_offset_range;
+			local_offset = 0x10;
+			local_range = 1;
+			break;
+		default:
+			iter = 1;
+			local_offset = 0;
+			local_range = up_cim_reg->ireg_offset_range;
+			break;
+		}
+
+		for (j = 0; j < iter; j++, buff++) {
+			rc = t4_cim_read(padap,
+					 up_cim_reg->ireg_local_offset +
+					 (j * local_offset), local_range, buff);
+			if (rc) {
+				cudbg_put_buff(&temp_buff, dbg_buff);
+				return rc;
+			}
 		}
 		up_cim++;
 	}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index caeee8e..eebefe7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
 		      struct cudbg_buffer *dbg_buff,
 		      struct cudbg_error *cudbg_err);
@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
 			struct cudbg_buffer *dbg_buff,
 			struct cudbg_error *cudbg_err);
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+			  struct cudbg_buffer *dbg_buff,
+			  struct cudbg_error *cudbg_err);
 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
 			     struct cudbg_buffer *dbg_buff,
 			     struct cudbg_error *cudbg_err);
@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
 		      struct cudbg_buffer *dbg_buff,
 		      struct cudbg_error *cudbg_err);
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
 			       struct cudbg_buffer *dbg_buff,
 			       struct cudbg_error *cudbg_err);
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+			      struct cudbg_buffer *dbg_buff,
+			      struct cudbg_error *cudbg_err);
 
 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
 u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
 int cudbg_dump_context_size(struct adapter *padap);
 
-struct cudbg_tcam;
+int cudbg_fill_meminfo(struct adapter *padap,
+		       struct cudbg_meminfo *meminfo_buff);
 void cudbg_fill_le_tcam_info(struct adapter *padap,
 			     struct cudbg_tcam *tcam_region);
 #endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d8424ed..baa67d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -77,7 +77,8 @@ enum {
 	MEM_EDC1,
 	MEM_MC,
 	MEM_MC0 = MEM_MC,
-	MEM_MC1
+	MEM_MC1,
+	MEM_HMA,
 };
 
 enum {
@@ -824,6 +825,10 @@ struct mbox_list {
 	struct list_head list;
 };
 
+struct mps_encap_entry {
+	atomic_t refcnt;
+};
+
 struct adapter {
 	void __iomem *regs;
 	void __iomem *bar2;
@@ -838,6 +843,8 @@ struct adapter {
 	enum chip_type chip;
 
 	int msg_enable;
+	__be16 vxlan_port;
+	u8 vxlan_port_cnt;
 
 	struct adapter_params params;
 	struct cxgb4_virt_res vres;
@@ -867,7 +874,10 @@ struct adapter {
 	unsigned int clipt_start;
 	unsigned int clipt_end;
 	struct clip_tbl *clipt;
+	unsigned int rawf_start;
+	unsigned int rawf_cnt;
 	struct smt_data *smt;
+	struct mps_encap_entry *mps_encap;
 	struct cxgb4_uld_info *uld;
 	void *uld_handle[CXGB4_ULD_MAX];
 	unsigned int num_uld;
@@ -1304,6 +1314,7 @@ void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
 void cxgb4_set_ethtool_ops(struct net_device *netdev);
 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
 extern int dbfifo_int_thresh;
 
 #define for_each_port(adapter, iter) \
@@ -1422,6 +1433,21 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
 	q->size = size;
 }
 
+/**
+ *     t4_is_inserted_mod_type - is a plugged in Firmware Module Type
+ *     @fw_mod_type: the Firmware Mofule Type
+ *
+ *     Return whether the Firmware Module Type represents a real Transceiver
+ *     Module/Cable Module Type which has been inserted.
+ */
+static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
+{
+	return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+		fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
+		fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
+		fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
 		       unsigned int data_reg, const u32 *vals,
 		       unsigned int nregs, unsigned int start_idx);
@@ -1511,6 +1537,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
 		     int port, int pf, int vf, u8 mac[]);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
+unsigned int t4_chip_rss_size(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
 			int start, int n, const u16 *rspq, unsigned int nrspq);
 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -1620,6 +1647,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		int mtu, int promisc, int all_multi, int bcast, int vlanex,
 		bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			 const u8 *addr, const u8 *mask, unsigned int idx,
+			 u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			  const u8 *addr, const u8 *mask, unsigned int idx,
+			  u8 lookup_type, u8 port_id, bool sleep_ok);
 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
 		      unsigned int viid, bool free, unsigned int naddr,
 		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
@@ -1652,7 +1685,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int eqid);
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int eqid);
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
 int t4_update_port_info(struct port_info *pi);
 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
@@ -1695,6 +1728,9 @@ void t4_uld_mem_free(struct adapter *adap);
 int t4_uld_mem_alloc(struct adapter *adap);
 void t4_uld_clean_up(struct adapter *adap);
 void t4_register_netevent_notifier(void);
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+	      unsigned int devid, unsigned int offset,
+	      unsigned int len, u8 *buf);
 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
 		  unsigned int n, bool unmap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 29cc625..a2d6c8a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -18,11 +18,13 @@
 #include "t4_regs.h"
 #include "cxgb4.h"
 #include "cxgb4_cudbg.h"
-#include "cudbg_entity.h"
 
 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
 	{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
 	{ CUDBG_EDC1, cudbg_collect_edc1_meminfo },
+	{ CUDBG_MC0, cudbg_collect_mc0_meminfo },
+	{ CUDBG_MC1, cudbg_collect_mc1_meminfo },
+	{ CUDBG_HMA, cudbg_collect_hma_meminfo },
 };
 
 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
@@ -53,6 +55,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
 	{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
 	{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
 	{ CUDBG_TP_LA, cudbg_collect_tp_la },
+	{ CUDBG_MEMINFO, cudbg_collect_meminfo },
 	{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
 	{ CUDBG_CLK, cudbg_collect_clk_info },
 	{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
@@ -60,6 +63,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
 	{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
 	{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
 	{ CUDBG_TID_INFO, cudbg_collect_tid },
+	{ CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
 	{ CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
 	{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
 	{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
@@ -158,8 +162,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 		}
 		len = cudbg_mbytes_to_bytes(len);
 		break;
+	case CUDBG_MC0:
+		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+		if (value & EXT_MEM0_ENABLE_F) {
+			value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+			len = EXT_MEM0_SIZE_G(value);
+		}
+		len = cudbg_mbytes_to_bytes(len);
+		break;
+	case CUDBG_MC1:
+		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+		if (value & EXT_MEM1_ENABLE_F) {
+			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			len = EXT_MEM1_SIZE_G(value);
+		}
+		len = cudbg_mbytes_to_bytes(len);
+		break;
 	case CUDBG_RSS:
-		len = RSS_NENTRIES * sizeof(u16);
+		len = t4_chip_rss_size(adap) * sizeof(u16);
 		break;
 	case CUDBG_RSS_VF_CONF:
 		len = adap->params.arch.vfcount *
@@ -201,6 +221,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 	case CUDBG_TP_LA:
 		len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
 		break;
+	case CUDBG_MEMINFO:
+		len = sizeof(struct cudbg_meminfo);
+		break;
 	case CUDBG_CIM_PIF_LA:
 		len = sizeof(struct cudbg_cim_pif_la);
 		len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
@@ -219,6 +242,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 	case CUDBG_TID_INFO:
 		len = sizeof(struct cudbg_tid_info_region_rev1);
 		break;
+	case CUDBG_PCIE_CONFIG:
+		len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+		break;
 	case CUDBG_DUMP_CONTEXT:
 		len = cudbg_dump_context_size(adap);
 		break;
@@ -248,7 +274,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 		len = sizeof(struct cudbg_ulptx_la);
 		break;
 	case CUDBG_UP_CIM_INDIRECT:
-		n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+		n = 0;
+		if (is_t5(adap->params.chip))
+			n = sizeof(t5_up_cim_reg_array) /
+			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
+		else if (is_t6(adap->params.chip))
+			n = sizeof(t6_up_cim_reg_array) /
+			    ((IREG_NUM_ELEM + 1) * sizeof(u32));
 		len = sizeof(struct ireg_buf) * n;
 		break;
 	case CUDBG_PBT_TABLE:
@@ -264,6 +296,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
 			len = sizeof(struct ireg_buf) * n;
 		}
 		break;
+	case CUDBG_HMA:
+		value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+		if (value & HMA_MUX_F) {
+			/* In T6, there's no MC1.  So, HMA shares MC1
+			 * address space.
+			 */
+			value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			len = EXT_MEM1_SIZE_G(value);
+		}
+		len = cudbg_mbytes_to_bytes(len);
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index c099b5a..7ceeb0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -20,6 +20,7 @@
 
 #include "cudbg_if.h"
 #include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
 #include "cudbg_lib.h"
 
 typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 917663b..4ea76c1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -45,6 +45,10 @@
 #include "cxgb4_debugfs.h"
 #include "clip_tbl.h"
 #include "l2t.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
+#include "cudbg_lib.h"
 
 /* generic seq_file support for showing a table of size rows x width. */
 static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1739,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 			 */
 			if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
 				/* Inner header VNI */
-				vniy = ((data2 & DATAVIDH2_F) << 23) |
+				vniy = (data2 & DATAVIDH2_F) |
 				       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
 				dip_hit = data2 & DATADIPHIT_F;
 			} else {
@@ -1749,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 			port_num = DATAPORTNUM_G(data2);
 
 			/* Read tcamx. Change the control param */
+			vnix = 0;
 			ctl |= CTLXYBITSEL_V(1);
 			t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
 			val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
@@ -1757,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 			data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
 			if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
 				/* Inner header VNI mask */
-				vnix = ((data2 & DATAVIDH2_F) << 23) |
+				vnix = (data2 & DATAVIDH2_F) |
 				       (DATAVIDH1_G(data2) << 16) | VIDL_G(val);
 			}
 		} else {
@@ -1830,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
 					   addr[1], addr[2], addr[3],
 					   addr[4], addr[5],
 					   (unsigned long long)mask,
-					   vniy, vnix, dip_hit ? 'Y' : 'N',
+					   vniy, (vnix | vniy),
+					   dip_hit ? 'Y' : 'N',
 					   port_num,
 					   (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
 					   PORTMAP_G(cls_hi),
@@ -2017,11 +2023,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx)
 
 static int rss_open(struct inode *inode, struct file *file)
 {
-	int ret;
-	struct seq_tab *p;
 	struct adapter *adap = inode->i_private;
+	int ret, nentries;
+	struct seq_tab *p;
 
-	p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+	nentries = t4_chip_rss_size(adap);
+	p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
 	if (!p)
 		return -ENOMEM;
 
@@ -2664,10 +2671,14 @@ static const struct file_operations mem_debugfs_fops = {
 
 static int tid_info_show(struct seq_file *seq, void *v)
 {
+	unsigned int tid_start = 0;
 	struct adapter *adap = seq->private;
 	const struct tid_info *t = &adap->tids;
 	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
 
+	if (chip > CHELSIO_T5)
+		tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+
 	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
 		unsigned int sb;
 		seq_printf(seq, "Connections in use: %u\n",
@@ -2679,8 +2690,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
 			sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
 
 		if (sb) {
-			seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
-				   adap->tids.hash_base,
+			seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+				   sb - 1, adap->tids.hash_base,
 				   t->ntids - 1);
 			seq_printf(seq, ", in use: %u/%u\n",
 				   atomic_read(&t->tids_in_use),
@@ -2705,7 +2716,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
 		seq_printf(seq, "Connections in use: %u\n",
 			   atomic_read(&t->conns_in_use));
 
-		seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+		seq_printf(seq, "TID range: %u..%u", tid_start,
+			   tid_start + t->ntids - 1);
 		seq_printf(seq, ", in use: %u\n",
 			   atomic_read(&t->tids_in_use));
 	}
@@ -2794,18 +2806,6 @@ static const struct file_operations blocked_fl_fops = {
 	.llseek  = generic_file_llseek,
 };
 
-struct mem_desc {
-	unsigned int base;
-	unsigned int limit;
-	unsigned int idx;
-};
-
-static int mem_desc_cmp(const void *a, const void *b)
-{
-	return ((const struct mem_desc *)a)->base -
-	       ((const struct mem_desc *)b)->base;
-}
-
 static void mem_region_show(struct seq_file *seq, const char *name,
 			    unsigned int from, unsigned int to)
 {
@@ -2819,250 +2819,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
 static int meminfo_show(struct seq_file *seq, void *v)
 {
 	static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
-					"MC0:", "MC1:"};
-	static const char * const region[] = {
-		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
-		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
-		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
-		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
-		"RQUDP region:", "PBL region:", "TXPBL region:",
-		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
-		"On-chip queues:"
-	};
-
-	int i, n;
-	u32 lo, hi, used, alloc;
-	struct mem_desc avail[4];
-	struct mem_desc mem[ARRAY_SIZE(region) + 3];      /* up to 3 holes */
-	struct mem_desc *md = mem;
+					       "MC0:", "MC1:", "HMA:"};
 	struct adapter *adap = seq->private;
+	struct cudbg_meminfo meminfo;
+	int i, rc;
 
-	for (i = 0; i < ARRAY_SIZE(mem); i++) {
-		mem[i].limit = 0;
-		mem[i].idx = i;
-	}
+	memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
+	rc = cudbg_fill_meminfo(adap, &meminfo);
+	if (rc)
+		return -ENXIO;
 
-	/* Find and sort the populated memory ranges */
-	i = 0;
-	lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
-	if (lo & EDRAM0_ENABLE_F) {
-		hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
-		avail[i].base = EDRAM0_BASE_G(hi) << 20;
-		avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
-		avail[i].idx = 0;
-		i++;
-	}
-	if (lo & EDRAM1_ENABLE_F) {
-		hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
-		avail[i].base = EDRAM1_BASE_G(hi) << 20;
-		avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
-		avail[i].idx = 1;
-		i++;
-	}
-
-	if (is_t5(adap->params.chip)) {
-		if (lo & EXT_MEM0_ENABLE_F) {
-			hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
-			avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
-			avail[i].limit =
-				avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
-			avail[i].idx = 3;
-			i++;
-		}
-		if (lo & EXT_MEM1_ENABLE_F) {
-			hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
-			avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
-			avail[i].limit =
-				avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
-			avail[i].idx = 4;
-			i++;
-		}
-	} else {
-		if (lo & EXT_MEM_ENABLE_F) {
-			hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
-			avail[i].base = EXT_MEM_BASE_G(hi) << 20;
-			avail[i].limit =
-				avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
-			avail[i].idx = 2;
-			i++;
-		}
-	}
-	if (!i)                                    /* no memory available */
-		return 0;
-	sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
-
-	(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
-	(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
-	(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
-	(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
-
-	/* the next few have explicit upper bounds */
-	md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
-	md->limit = md->base - 1 +
-		    t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
-		    PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
-	md++;
-
-	md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
-	md->limit = md->base - 1 +
-		    t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
-		    PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
-	md++;
-
-	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
-			hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
-			md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
-		 } else {
-			hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
-			md->base = t4_read_reg(adap,
-					       LE_DB_HASH_TBL_BASE_ADDR_A);
-		}
-		md->limit = 0;
-	} else {
-		md->base = 0;
-		md->idx = ARRAY_SIZE(region);  /* hide it */
-	}
-	md++;
-
-#define ulp_region(reg) do { \
-	md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
-	(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
-} while (0)
-
-	ulp_region(RX_ISCSI);
-	ulp_region(RX_TDDP);
-	ulp_region(TX_TPT);
-	ulp_region(RX_STAG);
-	ulp_region(RX_RQ);
-	ulp_region(RX_RQUDP);
-	ulp_region(RX_PBL);
-	ulp_region(TX_PBL);
-#undef ulp_region
-	md->base = 0;
-	md->idx = ARRAY_SIZE(region);
-	if (!is_t4(adap->params.chip)) {
-		u32 size = 0;
-		u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
-		u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
-
-		if (is_t5(adap->params.chip)) {
-			if (sge_ctrl & VFIFO_ENABLE_F)
-				size = DBVFIFO_SIZE_G(fifo_size);
-		} else {
-			size = T6_DBVFIFO_SIZE_G(fifo_size);
-		}
-
-		if (size) {
-			md->base = BASEADDR_G(t4_read_reg(adap,
-					SGE_DBVFIFO_BADDR_A));
-			md->limit = md->base + (size << 2) - 1;
-		}
-	}
-
-	md++;
-
-	md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
-	md->limit = 0;
-	md++;
-	md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
-	md->limit = 0;
-	md++;
-
-	md->base = adap->vres.ocq.start;
-	if (adap->vres.ocq.size)
-		md->limit = md->base + adap->vres.ocq.size - 1;
-	else
-		md->idx = ARRAY_SIZE(region);  /* hide it */
-	md++;
-
-	/* add any address-space holes, there can be up to 3 */
-	for (n = 0; n < i - 1; n++)
-		if (avail[n].limit < avail[n + 1].base)
-			(md++)->base = avail[n].limit;
-	if (avail[n].limit)
-		(md++)->base = avail[n].limit;
-
-	n = md - mem;
-	sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
-
-	for (lo = 0; lo < i; lo++)
-		mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
-				avail[lo].limit - 1);
+	for (i = 0; i < meminfo.avail_c; i++)
+		mem_region_show(seq, memory[meminfo.avail[i].idx],
+				meminfo.avail[i].base,
+				meminfo.avail[i].limit - 1);
 
 	seq_putc(seq, '\n');
-	for (i = 0; i < n; i++) {
-		if (mem[i].idx >= ARRAY_SIZE(region))
+	for (i = 0; i < meminfo.mem_c; i++) {
+		if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
 			continue;                        /* skip holes */
-		if (!mem[i].limit)
-			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
-		mem_region_show(seq, region[mem[i].idx], mem[i].base,
-				mem[i].limit);
+		if (!meminfo.mem[i].limit)
+			meminfo.mem[i].limit =
+				i < meminfo.mem_c - 1 ?
+				meminfo.mem[i + 1].base - 1 : ~0;
+		mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
+				meminfo.mem[i].base, meminfo.mem[i].limit);
 	}
 
 	seq_putc(seq, '\n');
-	lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
-	hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
-	mem_region_show(seq, "uP RAM:", lo, hi);
+	mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
+	mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
+			meminfo.up_extmem2_hi);
 
-	lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
-	hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
-	mem_region_show(seq, "uP Extmem2:", lo, hi);
-
-	lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
 	seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
-		   PMRXMAXPAGE_G(lo),
-		   t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
-		   (lo & PMRXNUMCHN_F) ? 2 : 1);
+		   meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
+		   meminfo.rx_pages_data[2]);
 
-	lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
-	hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
 	seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
-		   PMTXMAXPAGE_G(lo),
-		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
-		   hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
-	seq_printf(seq, "%u p-structs\n\n",
-		   t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+		   meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
+		   meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
 
-	for (i = 0; i < 4; i++) {
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
-			lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
-		else
-			lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
-		if (is_t5(adap->params.chip)) {
-			used = T5_USED_G(lo);
-			alloc = T5_ALLOC_G(lo);
-		} else {
-			used = USED_G(lo);
-			alloc = ALLOC_G(lo);
-		}
+	seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+
+	for (i = 0; i < 4; i++)
 		/* For T6 these are MAC buffer groups */
 		seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
-			   i, used, alloc);
-	}
-	for (i = 0; i < adap->params.arch.nchan; i++) {
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
-			lo = t4_read_reg(adap,
-					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
-		else
-			lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
-		if (is_t5(adap->params.chip)) {
-			used = T5_USED_G(lo);
-			alloc = T5_ALLOC_G(lo);
-		} else {
-			used = USED_G(lo);
-			alloc = ALLOC_G(lo);
-		}
+			   i, meminfo.port_used[i], meminfo.port_alloc[i]);
+
+	for (i = 0; i < adap->params.arch.nchan; i++)
 		/* For T6 these are MAC buffer groups */
 		seq_printf(seq,
 			   "Loopback %d using %u pages out of %u allocated\n",
-			   i, used, alloc);
-	}
+			   i, meminfo.loopback_used[i],
+			   meminfo.loopback_alloc[i]);
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index eb33821..7852d98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
 		else
 			return PORT_OTHER;
 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
-		   port_type == FW_PORT_TYPE_KR_SFP28) {
+		   port_type == FW_PORT_TYPE_KR_SFP28 ||
+		   port_type == FW_PORT_TYPE_KR_XLAUI) {
 		return PORT_NONE;
 	}
 
@@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
 		break;
 
+	case FW_PORT_TYPE_KR_XLAUI:
+		SET_LMM(Backplane);
+		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+		break;
+
 	case FW_PORT_TYPE_CR2_QSFP:
 		SET_LMM(FIBRE);
 		SET_LMM(50000baseSR2_Full);
@@ -1396,6 +1404,101 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
 	return 0;
 }
 
+static int cxgb4_get_module_info(struct net_device *dev,
+				 struct ethtool_modinfo *modinfo)
+{
+	struct port_info *pi = netdev_priv(dev);
+	u8 sff8472_comp, sff_diag_type, sff_rev;
+	struct adapter *adapter = pi->adapter;
+	int ret;
+
+	if (!t4_is_inserted_mod_type(pi->mod_type))
+		return -EINVAL;
+
+	switch (pi->port_type) {
+	case FW_PORT_TYPE_SFP:
+	case FW_PORT_TYPE_QSA:
+	case FW_PORT_TYPE_SFP28:
+		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
+				SFF_8472_COMP_LEN, &sff8472_comp);
+		if (ret)
+			return ret;
+		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
+				SFP_DIAG_TYPE_LEN, &sff_diag_type);
+		if (ret)
+			return ret;
+
+		if (!sff8472_comp || (sff_diag_type & 4)) {
+			modinfo->type = ETH_MODULE_SFF_8079;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+		} else {
+			modinfo->type = ETH_MODULE_SFF_8472;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		}
+		break;
+
+	case FW_PORT_TYPE_QSFP:
+	case FW_PORT_TYPE_QSFP_10G:
+	case FW_PORT_TYPE_CR_QSFP:
+	case FW_PORT_TYPE_CR2_QSFP:
+	case FW_PORT_TYPE_CR4_QSFP:
+		ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				I2C_DEV_ADDR_A0, SFF_REV_ADDR,
+				SFF_REV_LEN, &sff_rev);
+		/* For QSFP type ports, revision value >= 3
+		 * means the SFP is 8636 compliant.
+		 */
+		if (ret)
+			return ret;
+		if (sff_rev >= 0x3) {
+			modinfo->type = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+		} else {
+			modinfo->type = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cxgb4_get_module_eeprom(struct net_device *dev,
+				   struct ethtool_eeprom *eprom, u8 *data)
+{
+	int ret = 0, offset = eprom->offset, len = eprom->len;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	memset(data, 0, eprom->len);
+	if (offset + len <= I2C_PAGE_SIZE)
+		return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				 I2C_DEV_ADDR_A0, offset, len, data);
+
+	/* offset + len spans 0xa0 and 0xa1 pages */
+	if (offset <= I2C_PAGE_SIZE) {
+		/* read 0xa0 page */
+		len = I2C_PAGE_SIZE - offset;
+		ret =  t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+				 I2C_DEV_ADDR_A0, offset, len, data);
+		if (ret)
+			return ret;
+		offset = I2C_PAGE_SIZE;
+		/* Remaining bytes to be read from second page =
+		 * Total length - bytes read from first page
+		 */
+		len = eprom->len - len;
+	}
+	/* Read additional optical diagnostics from page 0xa2 if supported */
+	return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
+			 offset, len, &data[eprom->len - len]);
+}
+
 static const struct ethtool_ops cxgb_ethtool_ops = {
 	.get_link_ksettings = get_link_ksettings,
 	.set_link_ksettings = set_link_ksettings,
@@ -1430,6 +1533,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
 	.set_dump          = set_dump,
 	.get_dump_flag     = get_dump_flag,
 	.get_dump_data     = get_dump_data,
+	.get_module_info   = cxgb4_get_module_info,
+	.get_module_eeprom = cxgb4_get_module_eeprom,
 };
 
 void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5980f30..677a3ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -694,7 +694,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
 	if (f->smt)
 		cxgb4_smt_release(f->smt);
 
-	if (f->fs.hash && f->fs.type)
+	if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
 		cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
 
 	/* The zeroing of the filter rule below clears the filter valid,
@@ -1189,6 +1189,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 		       struct filter_ctx *ctx)
 {
 	struct adapter *adapter = netdev2adap(dev);
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
 	unsigned int max_fidx, fidx;
 	struct filter_entry *f;
 	u32 iconf;
@@ -1225,12 +1226,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 	 * insertion.
 	 */
 	if (fs->type == 0) { /* IPv4 */
-		/* If our IPv4 filter isn't being written to a
-		 * multiple of four filter index and there's an IPv6
-		 * filter at the multiple of 4 base slot, then we
-		 * prevent insertion.
+		/* For T6, If our IPv4 filter isn't being written to a
+		 * multiple of two filter index and there's an IPv6
+		 * filter at the multiple of 2 base slot, then we need
+		 * to delete that IPv6 filter ...
+		 * For adapters below T6, IPv6 filter occupies 4 entries.
+		 * Hence we need to delete the filter in multiple of 4 slot.
 		 */
-		fidx = filter_id & ~0x3;
+		if (chip_ver < CHELSIO_T6)
+			fidx = filter_id & ~0x3;
+		else
+			fidx = filter_id & ~0x1;
+
 		if (fidx != filter_id &&
 		    adapter->tids.ftid_tab[fidx].fs.type) {
 			f = &adapter->tids.ftid_tab[fidx];
@@ -1291,6 +1298,16 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
 	if (f->valid)
 		clear_filter(adapter, f);
 
+	if (is_t6(adapter->params.chip) && fs->type &&
+	    ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
+	    IPV6_ADDR_ANY) {
+		ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
+		if (ret) {
+			cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6);
+			return ret;
+		}
+	}
+
 	/* Convert the filter specification into our internal format.
 	 * We copy the PF/VF specification into the Outer VLAN field
 	 * here so the rest of the code -- including the interface to
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6f900ff..3293980 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
 #include <net/addrconf.h>
 #include <linux/uaccess.h>
 #include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
 
 #include "cxgb4.h"
 #include "cxgb4_filter.h"
@@ -1673,7 +1674,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
 {
 	struct adapter *adap = netdev2adap(dev);
 
-	return t4_sge_ctxt_flush(adap, adap->mbox);
+	return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
 }
 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
 
@@ -2987,6 +2988,151 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
 	}
 }
 
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+				struct udp_tunnel_info *ti)
+{
+	struct port_info *pi = netdev_priv(netdev);
+	struct adapter *adapter = pi->adapter;
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+	int ret = 0, i;
+
+	if (chip_ver < CHELSIO_T6)
+		return;
+
+	switch (ti->type) {
+	case UDP_TUNNEL_TYPE_VXLAN:
+		if (!adapter->vxlan_port_cnt ||
+		    adapter->vxlan_port != ti->port)
+			return; /* Invalid VxLAN destination port */
+
+		adapter->vxlan_port_cnt--;
+		if (adapter->vxlan_port_cnt)
+			return;
+
+		adapter->vxlan_port = 0;
+		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+		break;
+	default:
+		return;
+	}
+
+	/* Matchall mac entries can be deleted only after all tunnel ports
+	 * are brought down or removed.
+	 */
+	if (!adapter->rawf_cnt)
+		return;
+	for_each_port(adapter, i) {
+		pi = adap2pinfo(adapter, i);
+		ret = t4_free_raw_mac_filt(adapter, pi->viid,
+					   match_all_mac, match_all_mac,
+					   adapter->rawf_start +
+					    pi->port_id,
+					   1, pi->port_id, true);
+		if (ret < 0) {
+			netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+				    i);
+			return;
+		}
+		atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+			   pi->port_id].refcnt);
+	}
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+				struct udp_tunnel_info *ti)
+{
+	struct port_info *pi = netdev_priv(netdev);
+	struct adapter *adapter = pi->adapter;
+	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+	u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+	int i, ret;
+
+	if (chip_ver < CHELSIO_T6)
+		return;
+
+	switch (ti->type) {
+	case UDP_TUNNEL_TYPE_VXLAN:
+		/* For T6 fw reserves last 2 entries for
+		 * storing match all mac filter (config file entry).
+		 */
+		if (!adapter->rawf_cnt)
+			return;
+
+		/* Callback for adding vxlan port can be called with the same
+		 * port for both IPv4 and IPv6. We should not disable the
+		 * offloading when the same port for both protocols is added
+		 * and later one of them is removed.
+		 */
+		if (adapter->vxlan_port_cnt &&
+		    adapter->vxlan_port == ti->port) {
+			adapter->vxlan_port_cnt++;
+			return;
+		}
+
+		/* We will support only one VxLAN port */
+		if (adapter->vxlan_port_cnt) {
+			netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+				    be16_to_cpu(adapter->vxlan_port),
+				    be16_to_cpu(ti->port));
+			return;
+		}
+
+		adapter->vxlan_port = ti->port;
+		adapter->vxlan_port_cnt = 1;
+
+		t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+			     VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+		break;
+	default:
+		return;
+	}
+
+	/* Create a 'match all' mac filter entry for inner mac,
+	 * if raw mac interface is supported. Once the linux kernel provides
+	 * driver entry points for adding/deleting the inner mac addresses,
+	 * we will remove this 'match all' entry and fallback to adding
+	 * exact match filters.
+	 */
+	if (adapter->rawf_cnt) {
+		for_each_port(adapter, i) {
+			pi = adap2pinfo(adapter, i);
+
+			ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+						    match_all_mac,
+						    match_all_mac,
+						    adapter->rawf_start +
+						    pi->port_id,
+						    1, pi->port_id, true);
+			if (ret < 0) {
+				netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+					    be16_to_cpu(ti->port));
+				cxgb_del_udp_tunnel(netdev, ti);
+				return;
+			}
+			atomic_inc(&adapter->mps_encap[ret].refcnt);
+		}
+	}
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+					     struct net_device *dev,
+					     netdev_features_t features)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+		return features;
+
+	/* Check if hw supports offload for this packet */
+	if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+		return features;
+
+	/* Offload is not supported for this encapsulated packet */
+	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
 static netdev_features_t cxgb_fix_features(struct net_device *dev,
 					   netdev_features_t features)
 {
@@ -3018,6 +3164,9 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 	.ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
 	.ndo_setup_tc         = cxgb_setup_tc,
+	.ndo_udp_tunnel_add   = cxgb_add_udp_tunnel,
+	.ndo_udp_tunnel_del   = cxgb_del_udp_tunnel,
+	.ndo_features_check   = cxgb_features_check,
 	.ndo_fix_features     = cxgb_fix_features,
 };
 
@@ -5080,6 +5229,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 			NETIF_F_HW_TC;
+
+		if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+			netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
 		if (highdma)
 			netdev->hw_features |= NETIF_F_HIGHDMA;
 		netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index d4a548a..9b9f3f9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -405,9 +405,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
 		} else if (is_tcf_gact_shot(a)) {
 			fs->action = FILTER_DROP;
 		} else if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
-			struct net_device *out = __dev_get_by_index(dev_net(in),
-								    ifindex);
+			struct net_device *out = tcf_mirred_dev(a);
 			struct port_info *pi = netdev_priv(out);
 
 			fs->action = FILTER_SWITCH;
@@ -582,14 +580,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
 			/* Do nothing */
 		} else if (is_tcf_mirred_egress_redirect(a)) {
 			struct adapter *adap = netdev2adap(dev);
-			struct net_device *n_dev;
-			unsigned int i, ifindex;
+			struct net_device *n_dev, *target_dev;
+			unsigned int i;
 			bool found = false;
 
-			ifindex = tcf_mirred_ifindex(a);
+			target_dev = tcf_mirred_dev(a);
 			for_each_port(adap, i) {
 				n_dev = adap->port[i];
-				if (ifindex == n_dev->ifindex) {
+				if (target_dev == n_dev) {
 					found = true;
 					break;
 				}
@@ -765,9 +763,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
 
 	rhashtable_walk_enter(&adap->flower_tbl, &iter);
 	do {
-		flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
-		if (IS_ERR(flower_entry))
-			goto walk_stop;
+		rhashtable_walk_start(&iter);
 
 		while ((flower_entry = rhashtable_walk_next(&iter)) &&
 		       !IS_ERR(flower_entry)) {
@@ -786,8 +782,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
 				spin_unlock(&flower_entry->lock);
 			}
 		}
-walk_stop:
+
 		rhashtable_walk_stop(&iter);
+
 	} while (flower_entry == ERR_PTR(-EAGAIN));
 	rhashtable_walk_exit(&iter);
 	mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index cd0cd13..ab174bc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -114,14 +114,14 @@ static int fill_action_fields(struct adapter *adap,
 
 		/* Re-direct to specified port in hardware. */
 		if (is_tcf_mirred_egress_redirect(a)) {
-			struct net_device *n_dev;
-			unsigned int i, index;
+			struct net_device *n_dev, *target_dev;
 			bool found = false;
+			unsigned int i;
 
-			index = tcf_mirred_ifindex(a);
+			target_dev = tcf_mirred_dev(a);
 			for_each_port(adap, i) {
 				n_dev = adap->port[i];
-				if (index == n_dev->ifindex) {
+				if (target_dev == n_dev) {
 					fs->action = FILTER_SWITCH;
 					fs->eport = i;
 					found = true;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 922f2f9..eab781f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -770,12 +770,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
  *	Returns whether an Ethernet packet is small enough to fit as
  *	immediate data. Return value corresponds to headroom required.
  */
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
 {
-	int hdrlen = skb_shinfo(skb)->gso_size ?
-			sizeof(struct cpl_tx_pkt_lso_core) : 0;
+	int hdrlen = 0;
 
-	hdrlen += sizeof(struct cpl_tx_pkt);
+	if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+	    chip_ver > CHELSIO_T5) {
+		hdrlen = sizeof(struct cpl_tx_tnl_lso);
+		hdrlen += sizeof(struct cpl_tx_pkt_core);
+	} else {
+		hdrlen = skb_shinfo(skb)->gso_size ?
+			 sizeof(struct cpl_tx_pkt_lso_core) : 0;
+		hdrlen += sizeof(struct cpl_tx_pkt);
+	}
 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
 		return hdrlen;
 	return 0;
@@ -788,10 +795,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
  *	Returns the number of flits needed for a Tx WR for the given Ethernet
  *	packet, including the needed WR and CPL headers.
  */
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+					 unsigned int chip_ver)
 {
 	unsigned int flits;
-	int hdrlen = is_eth_imm(skb);
+	int hdrlen = is_eth_imm(skb, chip_ver);
 
 	/* If the skb is small enough, we can pump it out as a work request
 	 * with only immediate data.  In that case we just have to have the
@@ -810,13 +818,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
 	 * with an embedded TX Packet Write CPL message.
 	 */
 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
-	if (skb_shinfo(skb)->gso_size)
-		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
-			  sizeof(struct cpl_tx_pkt_lso_core) +
-			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
-	else
+	if (skb_shinfo(skb)->gso_size) {
+		if (skb->encapsulation && chip_ver > CHELSIO_T5)
+			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+				 sizeof(struct cpl_tx_tnl_lso);
+		else
+			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+				 sizeof(struct cpl_tx_pkt_lso_core);
+
+		hdrlen += sizeof(struct cpl_tx_pkt_core);
+		flits += (hdrlen / sizeof(__be64));
+	} else {
 		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
 			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+	}
 	return flits;
 }
 
@@ -827,9 +842,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
  *	Returns the number of Tx descriptors needed for the given Ethernet
  *	packet, including the needed WR and CPL headers.
  */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+					 unsigned int chip_ver)
 {
-	return flits_to_desc(calc_tx_flits(skb));
+	return flits_to_desc(calc_tx_flits(skb, chip_ver));
 }
 
 /**
@@ -1154,6 +1170,102 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
 }
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+	u8 l4_hdr = 0;
+	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+	struct port_info *pi = netdev_priv(skb->dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+	    skb->inner_protocol != htons(ETH_P_TEB))
+		return tnl_type;
+
+	switch (vlan_get_protocol(skb)) {
+	case htons(ETH_P_IP):
+		l4_hdr = ip_hdr(skb)->protocol;
+		break;
+	case htons(ETH_P_IPV6):
+		l4_hdr = ipv6_hdr(skb)->nexthdr;
+		break;
+	default:
+		return tnl_type;
+	}
+
+	switch (l4_hdr) {
+	case IPPROTO_UDP:
+		if (adapter->vxlan_port == udp_hdr(skb)->dest)
+			tnl_type = TX_TNL_TYPE_VXLAN;
+		break;
+	default:
+		return tnl_type;
+	}
+
+	return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+				   struct cpl_tx_tnl_lso *tnl_lso,
+				   enum cpl_tx_tnl_lso_type tnl_type)
+{
+	u32 val;
+	int in_eth_xtra_len;
+	int l3hdr_len = skb_network_header_len(skb);
+	int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+	const struct skb_shared_info *ssi = skb_shinfo(skb);
+	bool v6 = (ip_hdr(skb)->version == 6);
+
+	val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+	      CPL_TX_TNL_LSO_FIRST_F |
+	      CPL_TX_TNL_LSO_LAST_F |
+	      (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+	      CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+	      CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+	      (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+	      CPL_TX_TNL_LSO_IPLENSETOUT_F |
+	      (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+	tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+	tnl_lso->IpIdOffsetOut = 0;
+
+	/* Get the tunnel header length */
+	val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+	in_eth_xtra_len = skb_inner_network_header(skb) -
+			  skb_inner_mac_header(skb) - ETH_HLEN;
+
+	switch (tnl_type) {
+	case TX_TNL_TYPE_VXLAN:
+		tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+			htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+			CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+		break;
+	default:
+		tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+		break;
+	}
+
+	tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+		 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+		       CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+	tnl_lso->r1 = 0;
+
+	val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+	      CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+	      CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+	      CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+	tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+	tnl_lso->IpIdOffset = htons(0);
+
+	tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+	tnl_lso->TCPSeqOffset = htonl(0);
+	tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
 /**
  *	t4_eth_xmit - add a packet to an Ethernet Tx queue
  *	@skb: the packet
@@ -1177,6 +1289,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 	bool immediate = false;
 	int len, max_pkt_len;
 	bool ptp_enabled = is_ptp_enabled(skb, dev);
+	unsigned int chip_ver;
+	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
 #ifdef CONFIG_CHELSIO_T4_FCOE
 	int err;
 #endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1227,7 +1342,8 @@ out_free:	dev_kfree_skb_any(skb);
 	}
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 
-	flits = calc_tx_flits(skb);
+	chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+	flits = calc_tx_flits(skb, chip_ver);
 	ndesc = flits_to_desc(flits);
 	credits = txq_avail(&q->q) - ndesc;
 
@@ -1241,9 +1357,12 @@ out_free:	dev_kfree_skb_any(skb);
 		return NETDEV_TX_BUSY;
 	}
 
-	if (is_eth_imm(skb))
+	if (is_eth_imm(skb, chip_ver))
 		immediate = true;
 
+	if (skb->encapsulation && chip_ver > CHELSIO_T5)
+		tnl_type = cxgb_encap_offload_supported(skb);
+
 	if (!immediate &&
 	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
 		q->mapping_err++;
@@ -1270,33 +1389,58 @@ out_free:	dev_kfree_skb_any(skb);
 		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
 		int l3hdr_len = skb_network_header_len(skb);
 		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+		struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
 
-		len += sizeof(*lso);
+		if (tnl_type)
+			len += sizeof(*tnl_lso);
+		else
+			len += sizeof(*lso);
+
 		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
 				       FW_WR_IMMDLEN_V(len));
-		lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
-					LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
-					LSO_IPV6_V(v6) |
-					LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
-					LSO_IPHDR_LEN_V(l3hdr_len / 4) |
-					LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
-		lso->c.ipid_ofst = htons(0);
-		lso->c.mss = htons(ssi->gso_size);
-		lso->c.seqno_offset = htonl(0);
-		if (is_t4(adap->params.chip))
-			lso->c.len = htonl(skb->len);
-		else
-			lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
-		cpl = (void *)(lso + 1);
+		if (tnl_type) {
+			struct iphdr *iph = ip_hdr(skb);
 
-		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
-			cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
-		else
-			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+			t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+			cpl = (void *)(tnl_lso + 1);
+			/* Driver is expected to compute partial checksum that
+			 * does not include the IP Total Length.
+			 */
+			if (iph->version == 4) {
+				iph->check = 0;
+				iph->tot_len = 0;
+				iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+								 iph->ihl));
+			}
+			if (skb->ip_summed == CHECKSUM_PARTIAL)
+				cntrl = hwcsum(adap->params.chip, skb);
+		} else {
+			lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+					  LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+					  LSO_IPV6_V(v6) |
+					  LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+					  LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+					  LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+			lso->c.ipid_ofst = htons(0);
+			lso->c.mss = htons(ssi->gso_size);
+			lso->c.seqno_offset = htonl(0);
+			if (is_t4(adap->params.chip))
+				lso->c.len = htonl(skb->len);
+			else
+				lso->c.len =
+					htonl(LSO_T5_XFER_SIZE_V(skb->len));
+			cpl = (void *)(lso + 1);
 
-		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
-					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
-			 TXPKT_IPHDR_LEN_V(l3hdr_len);
+			if (CHELSIO_CHIP_VERSION(adap->params.chip)
+			    <= CHELSIO_T5)
+				cntrl =	TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+			else
+				cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+			cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+				 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+				 TXPKT_IPHDR_LEN_V(l3hdr_len);
+		}
 		q->tso++;
 		q->tx_cso += ssi->gso_segs;
 	} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 375ef86..6d76851 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -524,11 +524,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
 	 * MEM_EDC1 = 1
 	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
 	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
+	 * MEM_HMA  = 4
 	 */
 	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
-	if (mtype != MEM_MC1)
+	if (mtype == MEM_HMA) {
+		memoffset = 2 * (edc_size * 1024 * 1024);
+	} else if (mtype != MEM_MC1) {
 		memoffset = (mtype * (edc_size * 1024 * 1024));
-	else {
+	} else {
 		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
 						      MA_EXT_MEMORY0_BAR_A));
 		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
@@ -4923,6 +4926,14 @@ void t4_intr_disable(struct adapter *adapter)
 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
 }
 
+unsigned int t4_chip_rss_size(struct adapter *adap)
+{
+	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+		return RSS_NENTRIES;
+	else
+		return T6_RSS_NENTRIES;
+}
+
 /**
  *	t4_config_rss_range - configure a portion of the RSS mapping table
  *	@adapter: the adapter
@@ -5061,10 +5072,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val)
  */
 int t4_read_rss(struct adapter *adapter, u16 *map)
 {
+	int i, ret, nentries;
 	u32 val;
-	int i, ret;
 
-	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+	nentries = t4_chip_rss_size(adapter);
+	for (i = 0; i < nentries / 2; ++i) {
 		ret = rd_rss_row(adapter, i, &val);
 		if (ret)
 			return ret;
@@ -6071,6 +6083,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
 		"CR2_QSFP",
 		"SFP28",
 		"KR_SFP28",
+		"KR_XLAUI"
 	};
 
 	if (port_type < ARRAY_SIZE(port_type_description))
@@ -6526,18 +6539,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  *      t4_sge_ctxt_flush - flush the SGE context cache
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
+ *      @ctx_type: Egress or Ingress
  *
  *      Issues a FW command through the given mailbox to flush the
  *      SGE context cache.
  */
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
 {
 	int ret;
 	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+						 FW_LDST_ADDRSPC_SGE_EGRC :
+						 FW_LDST_ADDRSPC_SGE_INGC);
 	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
 					FW_CMD_REQUEST_F | FW_CMD_READ_F |
 					ldst_addrspace);
@@ -7451,6 +7467,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 }
 
 /**
+ *	t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ *	@adap: the adapter
+ *	@viid: the VI id
+ *	@addr: the MAC address
+ *	@mask: the mask
+ *	@idx: index of the entry in mps tcam
+ *	@lookup_type: MAC address for inner (1) or outer (0) header
+ *	@port_id: the port index
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Removes the mac entry at the specified index using raw mac interface.
+ *
+ *	Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			 const u8 *addr, const u8 *mask, unsigned int idx,
+			 u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_raw *p = &c.u.raw;
+	u32 val;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_CMD_EXEC_V(0) |
+				   FW_VI_MAC_CMD_VIID_V(viid));
+	val = FW_CMD_LEN16_V(1) |
+	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+	c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+					  FW_CMD_LEN16_V(val));
+
+	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+				     FW_VI_MAC_ID_BASED_FREE);
+
+	/* Lookup Type. Outer header: 0, Inner header: 1 */
+	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+				   DATAPORTNUM_V(port_id));
+	/* Lookup mask and port mask */
+	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+				    DATAPORTNUM_V(DATAPORTNUM_M));
+
+	/* Copy the address and the mask */
+	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+	return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ *	t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ *	@adap: the adapter
+ *	@viid: the VI id
+ *	@mac: the MAC address
+ *	@mask: the mask
+ *	@idx: index at which to add this entry
+ *	@port_id: the port index
+ *	@lookup_type: MAC address for inner (1) or outer (0) header
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Adds the mac entry at the specified index using raw mac interface.
+ *
+ *	Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+			  const u8 *addr, const u8 *mask, unsigned int idx,
+			  u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+	int ret = 0;
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_raw *p = &c.u.raw;
+	u32 val;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+				   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+				   FW_VI_MAC_CMD_VIID_V(viid));
+	val = FW_CMD_LEN16_V(1) |
+	      FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+	c.freemacs_to_len16 = cpu_to_be32(val);
+
+	/* Specify that this is an inner mac address */
+	p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+	/* Lookup Type. Outer header: 0, Inner header: 1 */
+	p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+				   DATAPORTNUM_V(port_id));
+	/* Lookup mask and port mask */
+	p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+				    DATAPORTNUM_V(DATAPORTNUM_M));
+
+	/* Copy the address and the mask */
+	memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+	memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+	ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+	if (ret == 0) {
+		ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+		if (ret != idx)
+			ret = -ENOMEM;
+	}
+
+	return ret;
+}
+
+/**
  *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
@@ -8491,22 +8613,6 @@ static int t4_get_flash_params(struct adapter *adap)
 	return 0;
 }
 
-static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
-{
-	u16 val;
-	u32 pcie_cap;
-
-	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
-	if (pcie_cap) {
-		pci_read_config_word(adapter->pdev,
-				     pcie_cap + PCI_EXP_DEVCTL2, &val);
-		val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
-		val |= range;
-		pci_write_config_word(adapter->pdev,
-				      pcie_cap + PCI_EXP_DEVCTL2, val);
-	}
-}
-
 /**
  *	t4_prep_adapter - prepare SW and HW for operation
  *	@adapter: the adapter
@@ -8592,8 +8698,9 @@ int t4_prep_adapter(struct adapter *adapter)
 	adapter->params.portvec = 1;
 	adapter->params.vpd.cclk = 50000;
 
-	/* Set pci completion timeout value to 4 seconds. */
-	set_pcie_completion_timeout(adapter, 0xd);
+	/* Set PCIe completion timeout to 4 seconds. */
+	pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
+					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
 	return 0;
 }
 
@@ -9736,3 +9843,59 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
 	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
 			       NULL, 1);
 }
+
+/**
+ *	t4_i2c_rd - read I2C data from adapter
+ *	@adap: the adapter
+ *	@port: Port number if per-port device; <0 if not
+ *	@devid: per-port device ID or absolute device ID
+ *	@offset: byte offset into device I2C space
+ *	@len: byte length of I2C space data
+ *	@buf: buffer in which to return I2C data
+ *
+ *	Reads the I2C data from the indicated device and location.
+ */
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+	      unsigned int devid, unsigned int offset,
+	      unsigned int len, u8 *buf)
+{
+	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
+	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
+	int ret = 0;
+
+	if (len > I2C_PAGE_SIZE)
+		return -EINVAL;
+
+	/* Dont allow reads that spans multiple pages */
+	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
+		return -EINVAL;
+
+	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+	ldst_cmd.op_to_addrspace =
+		cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+			    FW_CMD_REQUEST_F |
+			    FW_CMD_READ_F |
+			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
+	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
+	ldst_cmd.u.i2c.did = devid;
+
+	while (len > 0) {
+		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
+
+		ldst_cmd.u.i2c.boffset = offset;
+		ldst_cmd.u.i2c.blen = i2c_len;
+
+		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
+				 &ldst_rpl);
+		if (ret)
+			break;
+
+		memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
+		offset += i2c_len;
+		buf += i2c_len;
+		len -= i2c_len;
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index a964ed1..361d503 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -38,21 +38,22 @@
 #include <linux/types.h>
 
 enum {
-	NCHAN          = 4,     /* # of HW channels */
-	MAX_MTU        = 9600,  /* max MAC MTU, excluding header + FCS */
-	EEPROMSIZE     = 17408, /* Serial EEPROM physical size */
-	EEPROMVSIZE    = 32768, /* Serial EEPROM virtual address space size */
-	EEPROMPFSIZE   = 1024,  /* EEPROM writable area size for PFn, n>0 */
-	RSS_NENTRIES   = 2048,  /* # of entries in RSS mapping table */
-	TCB_SIZE       = 128,   /* TCB size */
-	NMTUS          = 16,    /* size of MTU table */
-	NCCTRL_WIN     = 32,    /* # of congestion control windows */
-	NTX_SCHED      = 8,     /* # of HW Tx scheduling queues */
-	PM_NSTATS      = 5,     /* # of PM stats */
-	T6_PM_NSTATS   = 7,     /* # of PM stats in T6 */
-	MBOX_LEN       = 64,    /* mailbox size in bytes */
-	TRACE_LEN      = 112,   /* length of trace data and mask */
-	FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
+	NCHAN           = 4,    /* # of HW channels */
+	MAX_MTU         = 9600, /* max MAC MTU, excluding header + FCS */
+	EEPROMSIZE      = 17408,/* Serial EEPROM physical size */
+	EEPROMVSIZE     = 32768,/* Serial EEPROM virtual address space size */
+	EEPROMPFSIZE    = 1024, /* EEPROM writable area size for PFn, n>0 */
+	RSS_NENTRIES    = 2048, /* # of entries in RSS mapping table */
+	T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */
+	TCB_SIZE        = 128,  /* TCB size */
+	NMTUS           = 16,   /* size of MTU table */
+	NCCTRL_WIN      = 32,   /* # of congestion control windows */
+	NTX_SCHED       = 8,    /* # of HW Tx scheduling queues */
+	PM_NSTATS       = 5,    /* # of PM stats */
+	T6_PM_NSTATS    = 7,    /* # of PM stats in T6 */
+	MBOX_LEN        = 64,   /* mailbox size in bytes */
+	TRACE_LEN       = 112,  /* length of trace data and mask */
+	FILTER_OPT_LEN  = 36,   /* filter tuple width for optional components */
 };
 
 enum {
@@ -70,7 +71,9 @@ enum {
 
 /* SGE context types */
 enum ctxt_type {
-	CTXT_FLM = 2,
+	CTXT_EGRESS,
+	CTXT_INGRESS,
+	CTXT_FLM,
 	CTXT_CNM,
 };
 
@@ -284,4 +287,14 @@ enum {
 #define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
 #define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
 
+#define I2C_DEV_ADDR_A0		0xa0
+#define I2C_DEV_ADDR_A2		0xa2
+#define I2C_PAGE_SIZE		0x100
+#define SFP_DIAG_TYPE_ADDR	0x5c
+#define SFP_DIAG_TYPE_LEN	0x1
+#define SFF_8472_COMP_ADDR	0x5e
+#define SFF_8472_COMP_LEN	0x1
+#define SFF_REV_ADDR		0x1
+#define SFF_REV_LEN		0x1
+
 #endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f24..d0db442 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
 
 	CPL_FW6_MSG           = 0xE0,
 	CPL_FW6_PLD           = 0xE1,
+	CPL_TX_TNL_LSO        = 0xEC,
 	CPL_TX_PKT_LSO        = 0xED,
 	CPL_TX_PKT_XT         = 0xEE,
 
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
 #define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
 #define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
 
+enum cpl_tx_tnl_lso_type {
+	TX_TNL_TYPE_OPAQUE,
+	TX_TNL_TYPE_NVGRE,
+	TX_TNL_TYPE_VXLAN,
+	TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+	__be32 op_to_IpIdSplitOut;
+	__be16 IpIdOffsetOut;
+	__be16 UdpLenSetOut_to_TnlHdrLen;
+	__be64 r1;
+	__be32 Flow_to_TcpHdrLen;
+	__be16 IpIdOffset;
+	__be16 IpIdSplit_to_Mss;
+	__be32 TCPSeqOffset;
+	__be32 EthLenOffset_Size;
+	/* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S		24
+#define CPL_TX_TNL_LSO_OPCODE_M		0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x)      ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x)      \
+	(((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S		23
+#define CPL_TX_TNL_LSO_FIRST_M		0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x)	((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F		CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S		22
+#define CPL_TX_TNL_LSO_LAST_M		0x1
+#define CPL_TX_TNL_LSO_LAST_V(x)	((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F		CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S	21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M	0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+	 CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S	20
+#define CPL_TX_TNL_LSO_IPV6OUT_M	0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x)	((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F        CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S	16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M	0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S	4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M	0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x)    \
+	(((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S	0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M	0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x)   \
+	(((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S            0
+#define CPL_TX_TNL_LSO_MSS_M            0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x)         ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x)         \
+	(((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S		0
+#define CPL_TX_TNL_LSO_SIZE_M		0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x)	((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S   16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M   0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S    4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M    0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S    3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M    0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F    CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S	2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M	0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F	CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S	1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M	0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x)  ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x)  \
+	(((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F     CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S   14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M   0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+	 CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F   CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S   15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M   0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+	((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+	(((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+	 CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F   CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S	12
+#define CPL_TX_TNL_LSO_TNLTYPE_M	0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x)	((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN	16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN	0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x)	((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x)	\
+	(((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S      0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M      0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x)	((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x)   \
+	(((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S		20
+#define CPL_TX_TNL_LSO_IPV6_M		0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x)	((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x)	\
+	(((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F		CPL_TX_TNL_LSO_IPV6_V(1U)
+
 #define ULP_TX_SC_MORE_S 23
 #define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
 #define ULP_TX_SC_MORE_F  ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60cf9e0..51b1803 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
 	CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
 	CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
 
 	/* T6 adapters:
 	 */
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
 	CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
 	CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
 	CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+	CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a7cfece..d9c06d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -45,6 +45,9 @@
 #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
 #define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
 
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
 #define MYPORT_BASE 0x1c000
 #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
 
@@ -961,6 +964,10 @@
 
 #define MA_EXT_MEMORY1_BAR_A 0x7808
 
+#define HMA_MUX_S    5
+#define HMA_MUX_V(x) ((x) << HMA_MUX_S)
+#define HMA_MUX_F    HMA_MUX_V(1U)
+
 #define EXT_MEM1_BASE_S    16
 #define EXT_MEM1_BASE_M    0xfffU
 #define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
@@ -2504,6 +2511,17 @@
 #define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
 #define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
 
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S    16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F    VXLAN_EN_V(1U)
+
+#define VXLAN_S    0
+#define VXLAN_M    0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
 #define MPS_CLS_TCAM_Y_L_A 0xf000
 #define MPS_CLS_TCAM_DATA0_A 0xf000
 #define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2530,8 +2548,14 @@
 
 #define DATAPORTNUM_S    12
 #define DATAPORTNUM_M    0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
 #define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
 
+#define DATALKPTYPE_S    10
+#define DATALKPTYPE_M    0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
 #define DATADIPHIT_S    8
 #define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
 #define DATADIPHIT_F    DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57eb4ad..f3310d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -828,6 +828,7 @@ enum fw_ldst_addrspc {
 	FW_LDST_ADDRSPC_MPS       = 0x0020,
 	FW_LDST_ADDRSPC_FUNC      = 0x0028,
 	FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+	FW_LDST_ADDRSPC_I2C       = 0x0038,
 };
 
 enum fw_ldst_mps_fid {
@@ -2059,6 +2060,7 @@ struct fw_vi_cmd {
 #define FW_VI_MAC_ADD_MAC		0x3FF
 #define FW_VI_MAC_ADD_PERSIST_MAC	0x3FE
 #define FW_VI_MAC_MAC_BASED_FREE	0x3FD
+#define FW_VI_MAC_ID_BASED_FREE		0x3FC
 #define FW_CLS_TCAM_NUM_ENTRIES		336
 
 enum fw_vi_mac_smac {
@@ -2075,6 +2077,13 @@ enum fw_vi_mac_result {
 	FW_VI_MAC_R_F_ACL_CHECK
 };
 
+enum fw_vi_mac_entry_types {
+	FW_VI_MAC_TYPE_EXACTMAC,
+	FW_VI_MAC_TYPE_HASHVEC,
+	FW_VI_MAC_TYPE_RAW,
+	FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
 struct fw_vi_mac_cmd {
 	__be32 op_to_viid;
 	__be32 freemacs_to_len16;
@@ -2086,6 +2095,13 @@ struct fw_vi_mac_cmd {
 		struct fw_vi_mac_hash {
 			__be64 hashvec;
 		} hash;
+		struct fw_vi_mac_raw {
+			__be32 raw_idx_pkd;
+			__be32 data0_pkd;
+			__be32 data1[2];
+			__be64 data0m_pkd;
+			__be32 data1m[2];
+		} raw;
 	} u;
 };
 
@@ -2095,6 +2111,12 @@ struct fw_vi_mac_cmd {
 #define FW_VI_MAC_CMD_FREEMACS_S	31
 #define FW_VI_MAC_CMD_FREEMACS_V(x)	((x) << FW_VI_MAC_CMD_FREEMACS_S)
 
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S      23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M      0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x)   ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x)	\
+	(((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
 #define FW_VI_MAC_CMD_HASHVECEN_S	23
 #define FW_VI_MAC_CMD_HASHVECEN_V(x)	((x) << FW_VI_MAC_CMD_HASHVECEN_S)
 #define FW_VI_MAC_CMD_HASHVECEN_F	FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2121,6 +2143,12 @@ struct fw_vi_mac_cmd {
 #define FW_VI_MAC_CMD_IDX_G(x)	\
 	(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
 
+#define FW_VI_MAC_CMD_RAW_IDX_S         16
+#define FW_VI_MAC_CMD_RAW_IDX_M         0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x)      ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x)      \
+	(((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
 #define FW_RXMODE_MTU_NO_CHG	65535
 
 struct fw_vi_rxmode_cmd {
@@ -2828,6 +2856,7 @@ enum fw_port_type {
 	FW_PORT_TYPE_CR2_QSFP,
 	FW_PORT_TYPE_SFP28,
 	FW_PORT_TYPE_KR_SFP28,
+	FW_PORT_TYPE_KR_XLAUI,
 
 	FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
 };
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b48361c..96f69f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1229,7 +1229,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
 		else
 			return PORT_OTHER;
 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
-		   port_type == FW_PORT_TYPE_KR_SFP28) {
+		   port_type == FW_PORT_TYPE_KR_SFP28 ||
+		   port_type == FW_PORT_TYPE_KR_XLAUI) {
 		return PORT_NONE;
 	}
 
@@ -1323,6 +1324,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
 		SET_LMM(25000baseKR_Full);
 		break;
 
+	case FW_PORT_TYPE_KR_XLAUI:
+		SET_LMM(Backplane);
+		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+		break;
+
 	case FW_PORT_TYPE_CR2_QSFP:
 		SET_LMM(FIBRE);
 		SET_LMM(50000baseSR2_Full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 14d7e67..129b914 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter)
 int t4vf_sge_init(struct adapter *adapter)
 {
 	struct sge_params *sge_params = &adapter->params.sge;
-	u32 fl0 = sge_params->sge_fl_buffer_size[0];
-	u32 fl1 = sge_params->sge_fl_buffer_size[1];
+	u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
+	u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
 	struct sge *s = &adapter->sge;
 
 	/*
@@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter)
 	 * the Physical Function Driver.  Ideally we should be able to deal
 	 * with _any_ configuration.  Practice is different ...
 	 */
-	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
+
+	/* We only bother using the Large Page logic if the Large Page Buffer
+	 * is larger than our Page Size Buffer.
+	 */
+	if (fl_large_pg <= fl_small_pg)
+		fl_large_pg = 0;
+
+	/* The Page Size Buffer must be exactly equal to our Page Size and the
+	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
+	 */
+	if (fl_small_pg != PAGE_SIZE ||
+	    (fl_large_pg & (fl_large_pg - 1)) != 0) {
 		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
-			fl0, fl1);
+			fl_small_pg, fl_large_pg);
 		return -EINVAL;
 	}
 	if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
@@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter)
 	/*
 	 * Now translate the adapter parameters into our internal forms.
 	 */
-	if (fl1)
-		s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+	if (fl_large_pg)
+		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
 	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
 			? 128 : 64);
 	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 6a95270..9b218f0 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
 #define ENIC_CQ_MAX		(ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX		(ENIC_CQ_MAX + 2)
 
+#define ENIC_WQ_NAPI_BUDGET	256
+
 #define ENIC_AIC_LARGE_PKT_DIFF	3
 
 struct enic_msix_entry {
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 462d0ce..efb9333 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -18,6 +18,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
 
 #include "enic_res.h"
 #include "enic.h"
@@ -578,6 +579,16 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
 	return __enic_set_rsskey(enic);
 }
 
+static int enic_get_ts_info(struct net_device *netdev,
+			    struct ethtool_ts_info *info)
+{
+	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+				SOF_TIMESTAMPING_RX_SOFTWARE |
+				SOF_TIMESTAMPING_SOFTWARE;
+
+	return 0;
+}
+
 static const struct ethtool_ops enic_ethtool_ops = {
 	.get_drvinfo = enic_get_drvinfo,
 	.get_msglevel = enic_get_msglevel,
@@ -597,6 +608,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
 	.get_rxfh = enic_get_rxfh,
 	.set_rxfh = enic_set_rxfh,
 	.get_link_ksettings = enic_get_ksettings,
+	.get_ts_info = enic_get_ts_info,
 };
 
 void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index e130fb7..f202ba7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -856,6 +856,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 
 	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
 		netif_tx_stop_queue(txq);
+	skb_tx_timestamp(skb);
 	if (!skb->xmit_more || netif_xmit_stopped(txq))
 		vnic_wq_doorbell(wq);
 
@@ -1499,7 +1500,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
 	unsigned int cq_wq = enic_cq_wq(enic, 0);
 	unsigned int intr = enic_legacy_io_intr();
 	unsigned int rq_work_to_do = budget;
-	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
 	unsigned int  work_done, rq_work_done = 0, wq_work_done;
 	int err;
 
@@ -1597,7 +1598,7 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
 	struct vnic_wq *wq = &enic->wq[wq_index];
 	unsigned int cq;
 	unsigned int intr;
-	unsigned int wq_work_to_do = -1; /* clean all desc possible */
+	unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
 	unsigned int wq_work_done;
 	unsigned int wq_irq;
 
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 02dd524..1a49297 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
 {
 	int i;
-	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
+	int num_entries = ARRAY_SIZE(cmd_priv_map);
 	u32 cmd_privileges = adapter->cmd_privileges;
 
 	for (i = 0; i < num_entries; i++)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a74300a..90aa69a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1868,6 +1868,8 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
 		ret = clk_prepare_enable(fep->clk_ref);
 		if (ret)
 			goto failed_clk_ref;
+
+		phy_reset_after_clk_enable(ndev->phydev);
 	} else {
 		clk_disable_unprepare(fep->clk_ahb);
 		clk_disable_unprepare(fep->clk_enet_out);
@@ -2840,6 +2842,7 @@ fec_enet_open(struct net_device *ndev)
 {
 	struct fec_enet_private *fep = netdev_priv(ndev);
 	int ret;
+	bool reset_again;
 
 	ret = pm_runtime_get_sync(&fep->pdev->dev);
 	if (ret < 0)
@@ -2850,6 +2853,17 @@ fec_enet_open(struct net_device *ndev)
 	if (ret)
 		goto clk_enable;
 
+	/* During the first fec_enet_open call the PHY isn't probed at this
+	 * point. Therefore the phy_reset_after_clk_enable() call within
+	 * fec_enet_clk_enable() fails. As we need this reset in order to be
+	 * sure the PHY is working correctly we check if we need to reset again
+	 * later when the PHY is probed
+	 */
+	if (ndev->phydev && ndev->phydev->drv)
+		reset_again = false;
+	else
+		reset_again = true;
+
 	/* I should reset the ring buffers here, but I don't yet know
 	 * a simple way to do that.
 	 */
@@ -2866,6 +2880,12 @@ fec_enet_open(struct net_device *ndev)
 	if (ret)
 		goto err_enet_mii_probe;
 
+	/* Call phy_reset_after_clk_enable() again if it failed during
+	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+	 */
+	if (reset_again)
+		phy_reset_after_clk_enable(ndev->phydev);
+
 	if (fep->quirks & FEC_QUIRK_ERR006687)
 		imx6q_cpuidle_fec_irqs_used();
 
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 30000b6..8bcf470 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -94,15 +94,6 @@
 	  compatibility layer. The engine would be used in Hisilicon hip08 family of
 	  SoCs and further upcoming SoCs.
 
-config HNS3_ENET
-	tristate "Hisilicon HNS3 Ethernet Device Support"
-	depends on 64BIT && PCI
-	depends on HNS3 && HNS3_HCLGE
-	---help---
-	  This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
-	  family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
-	  devices and their associated operations.
-
 config HNS3_DCB
 	bool "Hisilicon HNS3 Data Center Bridge Support"
 	default n
@@ -112,4 +103,23 @@
 
 	  If unsure, say N.
 
+config HNS3_HCLGEVF
+    tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+    depends on PCI_MSI
+    depends on HNS3
+	depends on HNS3_HCLGE
+    ---help---
+	  This selects the HNS3 VF drivers network acceleration engine & its hardware
+	  compatibility layer. The engine would be used in Hisilicon hip08 family of
+	  SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+	tristate "Hisilicon HNS3 Ethernet Device Support"
+	depends on 64BIT && PCI
+	depends on HNS3
+	---help---
+	  This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+	  family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+	  devices and their associated operations.
+
 endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 8b5cdf4..cac86e9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
 int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
 			enum hnae_led_state status)
 {
-	if (!mac_cb || !mac_cb->cpld_ctrl)
+	if (!mac_cb)
 		return 0;
 
 	return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 408b63f..ca247c2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -18,6 +18,7 @@ enum _dsm_op_index {
 	HNS_OP_LED_SET_FUNC             = 0x3,
 	HNS_OP_GET_PORT_TYPE_FUNC       = 0x4,
 	HNS_OP_GET_SFP_STAT_FUNC        = 0x5,
+	HNS_OP_LOCATE_LED_SET_FUNC      = 0x6,
 };
 
 enum _dsm_rst_type {
@@ -81,6 +82,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
        ACPI_FREE(obj);
 }
 
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+						 u8 op_type, u32 locate,
+						 u32 port)
+{
+	union acpi_object obj_args[2], argv4;
+	union acpi_object *obj;
+
+	obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+	obj_args[0].integer.value = locate;
+	obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+	obj_args[1].integer.value = port;
+
+	argv4.type = ACPI_TYPE_PACKAGE;
+	argv4.package.count = 2;
+	argv4.package.elements = obj_args;
+
+	obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+				&hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+	if (!obj) {
+		dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+			locate, port);
+		return;
+	}
+
+	ACPI_FREE(obj);
+}
+
 static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
 			     u16 speed, int data)
 {
@@ -160,6 +188,9 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
 static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
 			   enum hnae_led_state status)
 {
+	if (!mac_cb->cpld_ctrl)
+		return 0;
+
 	switch (status) {
 	case HNAE_LED_ACTIVE:
 		mac_cb->cpld_led_value =
@@ -184,6 +215,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
 	return 0;
 }
 
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+				enum hnae_led_state status)
+{
+	switch (status) {
+	case HNAE_LED_ACTIVE:
+		hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+						     HNS_OP_LOCATE_LED_SET_FUNC,
+						     CPLD_LED_ON_VALUE,
+						     mac_cb->mac_id);
+		break;
+	case HNAE_LED_INACTIVE:
+		hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+						     HNS_OP_LOCATE_LED_SET_FUNC,
+						     CPLD_LED_DEFAULT_VALUE,
+						     mac_cb->mac_id);
+		break;
+	default:
+		dev_err(mac_cb->dev, "invalid led state: %d!", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 #define RESET_REQ_OR_DREQ 1
 
 static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
@@ -660,7 +715,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
 	} else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
 		misc_op->cpld_set_led = hns_cpld_set_led_acpi;
 		misc_op->cpld_reset_led = cpld_led_reset_acpi;
-		misc_op->cpld_set_led_id = cpld_set_led_id;
+		misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
 
 		misc_op->dsaf_reset = hns_dsaf_rst_acpi;
 		misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index a9349e1..002534f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -1,7 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Makefile for the HISILICON network device drivers.
 #
 
 obj-$(CONFIG_HNS3) += hns3pf/
+obj-$(CONFIG_HNS3) += hns3vf/
 
 obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 0000000..3e9203e
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#define HCLGE_MBX_VF_MSG_DATA_NUM	16
+
+enum HCLGE_MBX_OPCODE {
+	HCLGE_MBX_RESET = 0x01,		/* (VF -> PF) assert reset */
+	HCLGE_MBX_SET_UNICAST,		/* (VF -> PF) set UC addr */
+	HCLGE_MBX_SET_MULTICAST,	/* (VF -> PF) set MC addr */
+	HCLGE_MBX_SET_VLAN,		/* (VF -> PF) set VLAN */
+	HCLGE_MBX_MAP_RING_TO_VECTOR,	/* (VF -> PF) map ring-to-vector */
+	HCLGE_MBX_UNMAP_RING_TO_VECTOR,	/* (VF -> PF) unamp ring-to-vector */
+	HCLGE_MBX_SET_PROMISC_MODE,	/* (VF -> PF) set promiscuous mode */
+	HCLGE_MBX_SET_MACVLAN,		/* (VF -> PF) set unicast filter */
+	HCLGE_MBX_API_NEGOTIATE,	/* (VF -> PF) negotiate API version */
+	HCLGE_MBX_GET_QINFO,		/* (VF -> PF) get queue config */
+	HCLGE_MBX_GET_TCINFO,		/* (VF -> PF) get TC config */
+	HCLGE_MBX_GET_RETA,		/* (VF -> PF) get RETA */
+	HCLGE_MBX_GET_RSS_KEY,		/* (VF -> PF) get RSS key */
+	HCLGE_MBX_GET_MAC_ADDR,		/* (VF -> PF) get MAC addr */
+	HCLGE_MBX_PF_VF_RESP,		/* (PF -> VF) generate respone to VF */
+	HCLGE_MBX_GET_BDNUM,		/* (VF -> PF) get BD num */
+	HCLGE_MBX_GET_BUFSIZE,		/* (VF -> PF) get buffer size */
+	HCLGE_MBX_GET_STREAMID,		/* (VF -> PF) get stream id */
+	HCLGE_MBX_SET_AESTART,		/* (VF -> PF) start ae */
+	HCLGE_MBX_SET_TSOSTATS,		/* (VF -> PF) get tso stats */
+	HCLGE_MBX_LINK_STAT_CHANGE,	/* (PF -> VF) link status has changed */
+	HCLGE_MBX_GET_BASE_CONFIG,	/* (VF -> PF) get config */
+	HCLGE_MBX_BIND_FUNC_QUEUE,	/* (VF -> PF) bind function and queue */
+	HCLGE_MBX_GET_LINK_STATUS,	/* (VF -> PF) get link status */
+	HCLGE_MBX_QUEUE_RESET,		/* (VF -> PF) reset queue */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+	HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0,	/* modify UC mac addr */
+	HCLGE_MBX_MAC_VLAN_UC_ADD,		/* add a new UC mac addr */
+	HCLGE_MBX_MAC_VLAN_UC_REMOVE,		/* remove a new UC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_MODIFY,		/* modify MC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_ADD,		/* add new MC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_REMOVE,		/* remove MC mac addr */
+	HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,	/* config func MTA enable */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+	HCLGE_MBX_VLAN_FILTER = 0,	/* set vlan filter */
+	HCLGE_MBX_VLAN_TX_OFF_CFG,	/* set tx side vlan offload */
+	HCLGE_MBX_VLAN_RX_OFF_CFG,	/* set rx side vlan offload */
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE	16
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE	8
+
+struct hclgevf_mbx_resp_status {
+	struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+	u32 origin_mbx_msg;
+	bool received_resp;
+	int resp_status;
+	u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+	u8 rsv;
+	u8 mbx_src_vfid; /* Auto filled by IMP */
+	u8 rsv1[2];
+	u8 msg_len;
+	u8 rsv2[3];
+	u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+};
+
+struct hclge_mbx_pf_to_vf_cmd {
+	u8 dest_vfid;
+	u8 rsv[3];
+	u8 msg_len;
+	u8 rsv1[3];
+	u16 msg[8];
+};
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+	(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 5bcb223..02145f2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
 	const struct pci_device_id *id;
 	struct hnae3_ae_algo *ae_algo;
 	struct hnae3_client *client;
-	int ret = 0;
+	int ret = 0, lock_acquired;
 
-	mutex_lock(&hnae3_common_lock);
+	/* we can get deadlocked if SRIOV is being enabled in context to probe
+	 * and probe gets called again in same context. This can happen when
+	 * pci_enable_sriov() is called to create VFs from PF probes context.
+	 * Therefore, for simplicity uniformly defering further probing in all
+	 * cases where we detect contention.
+	 */
+	lock_acquired = mutex_trylock(&hnae3_common_lock);
+	if (!lock_acquired)
+		return -EPROBE_DEFER;
+
 	list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
 
 	/* Check if there are matched ae_algo */
@@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
 
 		if (!ae_dev->ops) {
 			dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n");
+			ret = -EOPNOTSUPP;
 			goto out_err;
 		}
 
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 67c59e1..adec88d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -274,10 +274,14 @@ struct hnae3_ae_dev {
  *   Get firmware version
  * get_mdix_mode()
  *   Get media typr of phy
+ * enable_vlan_filter()
+ *   Enable vlan filter
  * set_vlan_filter()
  *   Set vlan filter config of Ports
  * set_vf_vlan_filter()
  *   Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ *   Enable/disable hardware strip vlan tag of packets received
  */
 struct hnae3_ae_ops {
 	int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -380,12 +384,21 @@ struct hnae3_ae_ops {
 	void (*get_mdix_mode)(struct hnae3_handle *handle,
 			      u8 *tp_mdix_ctrl, u8 *tp_mdix);
 
+	void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
 	int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
 			       u16 vlan_id, bool is_kill);
 	int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
 				  u16 vlan, u8 qos, __be16 proto);
+	int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
 	void (*reset_event)(struct hnae3_handle *handle,
 			    enum hnae3_reset_type reset);
+	void (*get_channels)(struct hnae3_handle *handle,
+			     struct ethtool_channels *ch);
+	void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+				      u16 *free_tqps, u16 *max_rss_size);
+	int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num);
+	void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+				 u32 *flowctrl_adv);
 };
 
 struct hnae3_dcb_ops {
@@ -452,9 +465,10 @@ struct hnae3_unic_private_info {
 	struct hnae3_queue **tqp;  /* array base of all TQPs of this instance */
 };
 
-#define HNAE3_SUPPORT_MAC_LOOPBACK    1
-#define HNAE3_SUPPORT_PHY_LOOPBACK    2
-#define HNAE3_SUPPORT_SERDES_LOOPBACK 4
+#define HNAE3_SUPPORT_MAC_LOOPBACK    BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK    BIT(1)
+#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF	      BIT(3)
 
 struct hnae3_handle {
 	struct hnae3_client *client;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
similarity index 96%
rename from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
rename to drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 925619a..eb82700 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -93,7 +93,7 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle)
 {
 	struct net_device *dev = handle->kinfo.netdev;
 
-	if (!handle->kinfo.dcb_ops)
+	if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
 		return;
 
 	dev->dcbnl_ops = &hns3_dcbnl_ops;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
similarity index 92%
rename from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
rename to drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 5941509..14c7625 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -52,6 +52,8 @@ static const struct pci_device_id hns3_pci_tbl[] = {
 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
 	 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
 	/* required last entry */
 	{0, }
 };
@@ -245,6 +247,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
 	if (ret)
 		goto out_start_err;
 
+	clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
 	return 0;
 
 out_start_err:
@@ -284,6 +288,9 @@ static void hns3_nic_net_down(struct net_device *netdev)
 	const struct hnae3_ae_ops *ops;
 	int i;
 
+	if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+		return;
+
 	/* stop ae_dev */
 	ops = priv->ae_handle->ae_algo->ops;
 	if (ops->stop)
@@ -721,6 +728,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
 }
 
+static int hns3_fill_desc_vtags(struct sk_buff *skb,
+				struct hns3_enet_ring *tx_ring,
+				u32 *inner_vlan_flag,
+				u32 *out_vlan_flag,
+				u16 *inner_vtag,
+				u16 *out_vtag)
+{
+#define HNS3_TX_VLAN_PRIO_SHIFT 13
+
+	if (skb->protocol == htons(ETH_P_8021Q) &&
+	    !(tx_ring->tqp->handle->kinfo.netdev->features &
+	    NETIF_F_HW_VLAN_CTAG_TX)) {
+		/* When HW VLAN acceleration is turned off, and the stack
+		 * sets the protocol to 802.1q, the driver just need to
+		 * set the protocol to the encapsulated ethertype.
+		 */
+		skb->protocol = vlan_get_protocol(skb);
+		return 0;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		u16 vlan_tag;
+
+		vlan_tag = skb_vlan_tag_get(skb);
+		vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
+
+		/* Based on hw strategy, use out_vtag in two layer tag case,
+		 * and use inner_vtag in one tag case.
+		 */
+		if (skb->protocol == htons(ETH_P_8021Q)) {
+			hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+			*out_vtag = vlan_tag;
+		} else {
+			hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+			*inner_vtag = vlan_tag;
+		}
+	} else if (skb->protocol == htons(ETH_P_8021Q)) {
+		struct vlan_ethhdr *vhdr;
+		int rc;
+
+		rc = skb_cow_head(skb, 0);
+		if (rc < 0)
+			return rc;
+		vhdr = (struct vlan_ethhdr *)skb->data;
+		vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
+					<< HNS3_TX_VLAN_PRIO_SHIFT);
+	}
+
+	skb->protocol = vlan_get_protocol(skb);
+	return 0;
+}
+
 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 			  int size, dma_addr_t dma, int frag_end,
 			  enum hns_desc_type type)
@@ -731,6 +790,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 	u16 bdtp_fe_sc_vld_ra_ri = 0;
 	u32 type_cs_vlan_tso = 0;
 	struct sk_buff *skb;
+	u16 inner_vtag = 0;
+	u16 out_vtag = 0;
 	u32 paylen = 0;
 	u16 mss = 0;
 	__be16 protocol;
@@ -754,15 +815,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 		skb = (struct sk_buff *)priv;
 		paylen = skb->len;
 
+		ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
+					   &ol_type_vlan_len_msec,
+					   &inner_vtag, &out_vtag);
+		if (unlikely(ret))
+			return ret;
+
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			skb_reset_mac_len(skb);
 			protocol = skb->protocol;
 
-			/* vlan packet*/
-			if (protocol == htons(ETH_P_8021Q)) {
-				protocol = vlan_get_protocol(skb);
-				skb->protocol = protocol;
-			}
 			ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
 			if (ret)
 				return ret;
@@ -788,6 +850,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 			cpu_to_le32(type_cs_vlan_tso);
 		desc->tx.paylen = cpu_to_le32(paylen);
 		desc->tx.mss = cpu_to_le16(mss);
+		desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+		desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
 	}
 
 	/* move ring pointer to next.*/
@@ -1030,6 +1094,9 @@ static int hns3_nic_set_features(struct net_device *netdev,
 				 netdev_features_t features)
 {
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = priv->ae_handle;
+	netdev_features_t changed;
+	int ret;
 
 	if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
 		priv->ops.fill_desc = hns3_fill_desc_tso;
@@ -1039,15 +1106,32 @@ static int hns3_nic_set_features(struct net_device *netdev,
 		priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
 	}
 
+	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+		h->ae_algo->ops->enable_vlan_filter(h, true);
+	else
+		h->ae_algo->ops->enable_vlan_filter(h, false);
+
+	changed = netdev->features ^ features;
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+		if (features & NETIF_F_HW_VLAN_CTAG_RX)
+			ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
+		else
+			ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
+
+		if (ret)
+			return ret;
+	}
+
 	netdev->features = features;
 	return 0;
 }
 
-static void
-hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+static void hns3_nic_get_stats64(struct net_device *netdev,
+				 struct rtnl_link_stats64 *stats)
 {
 	struct hns3_nic_priv *priv = netdev_priv(netdev);
 	int queue_num = priv->ae_handle->kinfo.num_tqps;
+	struct hnae3_handle *handle = priv->ae_handle;
 	struct hns3_enet_ring *ring;
 	unsigned int start;
 	unsigned int idx;
@@ -1055,6 +1139,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 	u64 rx_bytes = 0;
 	u64 tx_pkts = 0;
 	u64 rx_pkts = 0;
+	u64 tx_drop = 0;
+	u64 rx_drop = 0;
+
+	if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+		return;
+
+	handle->ae_algo->ops->update_stats(handle, &netdev->stats);
 
 	for (idx = 0; idx < queue_num; idx++) {
 		/* fetch the tx stats */
@@ -1063,6 +1154,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 			start = u64_stats_fetch_begin_irq(&ring->syncp);
 			tx_bytes += ring->stats.tx_bytes;
 			tx_pkts += ring->stats.tx_pkts;
+			tx_drop += ring->stats.tx_busy;
+			tx_drop += ring->stats.sw_err_cnt;
 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 
 		/* fetch the rx stats */
@@ -1071,6 +1164,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 			start = u64_stats_fetch_begin_irq(&ring->syncp);
 			rx_bytes += ring->stats.rx_bytes;
 			rx_pkts += ring->stats.rx_pkts;
+			rx_drop += ring->stats.non_vld_descs;
+			rx_drop += ring->stats.err_pkt_len;
+			rx_drop += ring->stats.l2_err;
 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
 	}
 
@@ -1086,8 +1182,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
 
 	stats->tx_errors = netdev->stats.tx_errors;
-	stats->rx_dropped = netdev->stats.rx_dropped;
-	stats->tx_dropped = netdev->stats.tx_dropped;
+	stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
+	stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
 	stats->collisions = netdev->stats.collisions;
 	stats->rx_over_errors = netdev->stats.rx_over_errors;
 	stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -1317,6 +1413,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
 		return ret;
 	}
 
+	netdev->mtu = new_mtu;
+
 	/* if the netdev was running earlier, bring it up again */
 	if (if_running && hns3_nic_net_open(netdev))
 		ret = -EINVAL;
@@ -1476,6 +1574,8 @@ static struct pci_driver hns3_driver = {
 /* set default feature to hns3 */
 static void hns3_set_default_feature(struct net_device *netdev)
 {
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 
 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1490,6 +1590,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
 
 	netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_HW_VLAN_CTAG_FILTER |
+		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
@@ -1503,11 +1604,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
 	netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-		NETIF_F_HW_VLAN_CTAG_FILTER |
+		NETIF_F_HW_VLAN_CTAG_TX |
 		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
 		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
 		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
 		NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+	if (!(h->flags & HNAE3_SUPPORT_VF))
+		netdev->hw_features |=
+			NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
 }
 
 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2083,6 +2188,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
 
 	prefetchw(skb->data);
 
+	/* Based on hw strategy, the tag offloaded will be stored at
+	 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+	 * in one layer tag case.
+	 */
+	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+		u16 vlan_tag;
+
+		vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+		if (!(vlan_tag & VLAN_VID_MASK))
+			vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+		if (vlan_tag & VLAN_VID_MASK)
+			__vlan_hwaccel_put_tag(skb,
+					       htons(ETH_P_8021Q),
+					       vlan_tag);
+	}
+
 	bnum = 1;
 	if (length <= HNS3_RX_HEAD_SIZE) {
 		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
@@ -2649,6 +2770,19 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
 	return ret;
 }
 
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+	struct hnae3_handle *h = priv->ae_handle;
+	int i;
+
+	for (i = 0; i < h->kinfo.num_tqps; i++) {
+		devm_kfree(priv->dev, priv->ring_data[i].ring);
+		devm_kfree(priv->dev,
+			   priv->ring_data[i + h->kinfo.num_tqps].ring);
+	}
+	devm_kfree(priv->dev, priv->ring_data);
+}
+
 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
 {
 	int ret;
@@ -2785,8 +2919,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
 			h->ae_algo->ops->reset_queue(h, i);
 
 		hns3_fini_ring(priv->ring_data[i].ring);
+		devm_kfree(priv->dev, priv->ring_data[i].ring);
 		hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
+		devm_kfree(priv->dev,
+			   priv->ring_data[i + h->kinfo.num_tqps].ring);
 	}
+	devm_kfree(priv->dev, priv->ring_data);
 
 	return 0;
 }
@@ -3160,6 +3298,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
 	return ret;
 }
 
+static u16 hns3_get_max_available_channels(struct net_device *netdev)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+	u16 free_tqps, max_rss_size, max_tqps;
+
+	h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
+	max_tqps = h->kinfo.num_tc * max_rss_size;
+
+	return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
+}
+
+static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
+{
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+	int ret;
+
+	ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
+	if (ret)
+		return ret;
+
+	ret = hns3_get_ring_config(priv);
+	if (ret)
+		return ret;
+
+	ret = hns3_nic_init_vector_data(priv);
+	if (ret)
+		goto err_uninit_vector;
+
+	ret = hns3_init_all_ring(priv);
+	if (ret)
+		goto err_put_ring;
+
+	return 0;
+
+err_put_ring:
+	hns3_put_ring_config(priv);
+err_uninit_vector:
+	hns3_nic_uninit_vector_data(priv);
+	return ret;
+}
+
+static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
+{
+	return (new_tqp_num / num_tc) * num_tc;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+		      struct ethtool_channels *ch)
+{
+	struct hns3_nic_priv *priv = netdev_priv(netdev);
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+	struct hnae3_knic_private_info *kinfo = &h->kinfo;
+	bool if_running = netif_running(netdev);
+	u32 new_tqp_num = ch->combined_count;
+	u16 org_tqp_num;
+	int ret;
+
+	if (ch->rx_count || ch->tx_count)
+		return -EINVAL;
+
+	if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
+	    new_tqp_num < kinfo->num_tc) {
+		dev_err(&netdev->dev,
+			"Change tqps fail, the tqp range is from %d to %d",
+			kinfo->num_tc,
+			hns3_get_max_available_channels(netdev));
+		return -EINVAL;
+	}
+
+	new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
+	if (kinfo->num_tqps == new_tqp_num)
+		return 0;
+
+	if (if_running)
+		dev_close(netdev);
+
+	hns3_clear_all_ring(h);
+
+	ret = hns3_nic_uninit_vector_data(priv);
+	if (ret) {
+		dev_err(&netdev->dev,
+			"Unbind vector with tqp fail, nothing is changed");
+		goto open_netdev;
+	}
+
+	hns3_uninit_all_ring(priv);
+
+	org_tqp_num = h->kinfo.num_tqps;
+	ret = hns3_modify_tqp_num(netdev, new_tqp_num);
+	if (ret) {
+		ret = hns3_modify_tqp_num(netdev, org_tqp_num);
+		if (ret) {
+			/* If revert to old tqp failed, fatal error occurred */
+			dev_err(&netdev->dev,
+				"Revert to old tqp num fail, ret=%d", ret);
+			return ret;
+		}
+		dev_info(&netdev->dev,
+			 "Change tqp num fail, Revert to old tqp num");
+	}
+
+open_netdev:
+	if (if_running)
+		dev_open(netdev);
+
+	return ret;
+}
+
 static const struct hnae3_client_ops client_ops = {
 	.init_instance = hns3_client_init,
 	.uninit_instance = hns3_client_uninit,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
similarity index 99%
rename from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
rename to drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 8a9de75..a2a7ea3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -595,6 +595,8 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
 	(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
 
 void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+		      struct ethtool_channels *ch);
 
 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
 int hns3_init_all_ring(struct hns3_nic_priv *priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
similarity index 86%
rename from drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
rename to drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a21470c..d3cb3ec 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -15,26 +15,25 @@
 
 struct hns3_stats {
 	char stats_string[ETH_GSTRING_LEN];
-	int stats_size;
 	int stats_offset;
 };
 
 /* tqp related stats */
 #define HNS3_TQP_STAT(_string, _member)	{			\
 	.stats_string = _string,				\
-	.stats_size = FIELD_SIZEOF(struct ring_stats, _member),	\
-	.stats_offset = offsetof(struct hns3_enet_ring, stats),	\
-}								\
+	.stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+			offsetof(struct ring_stats, _member),   \
+}
 
 static const struct hns3_stats hns3_txq_stats[] = {
 	/* Tx per-queue statistics */
-	HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt),
-	HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt),
-	HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt),
-	HNS3_TQP_STAT("tx_pkts", tx_pkts),
-	HNS3_TQP_STAT("tx_bytes", tx_bytes),
-	HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt),
-	HNS3_TQP_STAT("tx_restart_queue", restart_queue),
+	HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+	HNS3_TQP_STAT("tx_dropped", sw_err_cnt),
+	HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+	HNS3_TQP_STAT("packets", tx_pkts),
+	HNS3_TQP_STAT("bytes", tx_bytes),
+	HNS3_TQP_STAT("errors", tx_err_cnt),
+	HNS3_TQP_STAT("tx_wake", restart_queue),
 	HNS3_TQP_STAT("tx_busy", tx_busy),
 };
 
@@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = {
 
 static const struct hns3_stats hns3_rxq_stats[] = {
 	/* Rx per-queue statistics */
-	HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt),
-	HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt),
-	HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt),
-	HNS3_TQP_STAT("rx_pkts", rx_pkts),
-	HNS3_TQP_STAT("rx_bytes", rx_bytes),
-	HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt),
-	HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt),
-	HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len),
-	HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs),
-	HNS3_TQP_STAT("rx_err_bd_num", err_bd_num),
-	HNS3_TQP_STAT("rx_l2_err", l2_err),
-	HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err),
+	HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
+	HNS3_TQP_STAT("rx_dropped", sw_err_cnt),
+	HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+	HNS3_TQP_STAT("packets", rx_pkts),
+	HNS3_TQP_STAT("bytes", rx_bytes),
+	HNS3_TQP_STAT("errors", rx_err_cnt),
+	HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+	HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+	HNS3_TQP_STAT("non_vld_descs", non_vld_descs),
+	HNS3_TQP_STAT("err_bd_num", err_bd_num),
+	HNS3_TQP_STAT("l2_err", l2_err),
+	HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
 };
 
 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
 }
 
 static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
-				 u32 stat_count, u32 num_tqps)
+		u32 stat_count, u32 num_tqps, const char *prefix)
 {
-#define MAX_PREFIX_SIZE (8 + 4)
+#define MAX_PREFIX_SIZE (6 + 4)
 	u32 size_left;
 	u32 i, j;
 	u32 n1;
@@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
 			data[ETH_GSTRING_LEN - 1] = '\0';
 
 			/* first, prepend the prefix string */
-			n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i);
+			n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_",
+				      prefix, i);
 			n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1);
 			size_left = (ETH_GSTRING_LEN - 1) - n1;
 
@@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
 static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
 {
 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+	const char tx_prefix[] = "txq";
+	const char rx_prefix[] = "rxq";
 
 	/* get strings for Tx */
 	data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
-				   kinfo->num_tqps);
+				   kinfo->num_tqps, tx_prefix);
 
 	/* get strings for Rx */
 	data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
-				   kinfo->num_tqps);
+				   kinfo->num_tqps, rx_prefix);
 
 	return data;
 }
@@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
 	struct hns3_enet_ring *ring;
 	u8 *stat;
-	u32 i;
+	int i, j;
 
 	/* get stats for Tx */
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		ring = nic_priv->ring_data[i].ring;
-		for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) {
-			stat = (u8 *)ring + hns3_txq_stats[i].stats_offset;
+		for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+			stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
 			*data++ = *(u64 *)stat;
 		}
 	}
@@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
 	/* get stats for Rx */
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		ring = nic_priv->ring_data[i + kinfo->num_tqps].ring;
-		for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) {
-			stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset;
+		for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+			stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
 			*data++ = *(u64 *)stat;
 		}
 	}
@@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev,
 			&param->rx_pause, &param->tx_pause);
 }
 
+static int hns3_set_pauseparam(struct net_device *netdev,
+			       struct ethtool_pauseparam *param)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (h->ae_algo->ops->set_pauseparam)
+		return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+						       param->rx_pause,
+						       param->tx_pause);
+	return -EOPNOTSUPP;
+}
+
 static int hns3_get_link_ksettings(struct net_device *netdev,
 				   struct ethtool_link_ksettings *cmd)
 {
 	struct hnae3_handle *h = hns3_get_handle(netdev);
+	u32 flowctrl_adv = 0;
 	u32 supported_caps;
 	u32 advertised_caps;
 	u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
 		if (!cmd->base.autoneg)
 			advertised_caps &= ~HNS3_LM_AUTONEG_BIT;
 
+		advertised_caps &= ~HNS3_LM_PAUSE_BIT;
+
 		/* now, map driver link modes to ethtool link modes */
 		hns3_driv_to_eth_caps(supported_caps, cmd, false);
 		hns3_driv_to_eth_caps(advertised_caps, cmd, true);
@@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
 	/* 4.mdio_support */
 	cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
 
+	/* 5.get flow control setttings */
+	if (h->ae_algo->ops->get_flowctrl_adv)
+		h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv);
+
+	if (flowctrl_adv & ADVERTISED_Pause)
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     Pause);
+
+	if (flowctrl_adv & ADVERTISED_Asym_Pause)
+		ethtool_link_ksettings_add_link_mode(cmd, advertising,
+						     Asym_Pause);
+
 	return 0;
 }
 
@@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev,
 
 	switch (cmd->cmd) {
 	case ETHTOOL_GRXRINGS:
-		cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
+		cmd->data = h->kinfo.rss_size;
 		break;
 	case ETHTOOL_GRXFH:
 		return h->ae_algo->ops->get_rss_tuple(h, cmd);
@@ -849,6 +878,30 @@ static int hns3_nway_reset(struct net_device *netdev)
 	return genphy_restart_aneg(phy);
 }
 
+static void hns3_get_channels(struct net_device *netdev,
+			      struct ethtool_channels *ch)
+{
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (h->ae_algo->ops->get_channels)
+		h->ae_algo->ops->get_channels(h, ch);
+}
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+	.get_drvinfo = hns3_get_drvinfo,
+	.get_ringparam = hns3_get_ringparam,
+	.set_ringparam = hns3_set_ringparam,
+	.get_strings = hns3_get_strings,
+	.get_ethtool_stats = hns3_get_stats,
+	.get_sset_count = hns3_get_sset_count,
+	.get_rxnfc = hns3_get_rxnfc,
+	.get_rxfh_key_size = hns3_get_rss_key_size,
+	.get_rxfh_indir_size = hns3_get_rss_indir_size,
+	.get_rxfh = hns3_get_rss,
+	.set_rxfh = hns3_set_rss,
+	.get_link_ksettings = hns3_get_link_ksettings,
+};
+
 static const struct ethtool_ops hns3_ethtool_ops = {
 	.self_test = hns3_self_test,
 	.get_drvinfo = hns3_get_drvinfo,
@@ -856,6 +909,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
 	.get_ringparam = hns3_get_ringparam,
 	.set_ringparam = hns3_set_ringparam,
 	.get_pauseparam = hns3_get_pauseparam,
+	.set_pauseparam = hns3_set_pauseparam,
 	.get_strings = hns3_get_strings,
 	.get_ethtool_stats = hns3_get_stats,
 	.get_sset_count = hns3_get_sset_count,
@@ -868,9 +922,16 @@ static const struct ethtool_ops hns3_ethtool_ops = {
 	.get_link_ksettings = hns3_get_link_ksettings,
 	.set_link_ksettings = hns3_set_link_ksettings,
 	.nway_reset = hns3_nway_reset,
+	.get_channels = hns3_get_channels,
+	.set_channels = hns3_set_channels,
 };
 
 void hns3_ethtool_set_ops(struct net_device *netdev)
 {
-	netdev->ethtool_ops = &hns3_ethtool_ops;
+	struct hnae3_handle *h = hns3_get_handle(netdev);
+
+	if (h->flags & HNAE3_SUPPORT_VF)
+		netdev->ethtool_ops = &hns3vf_ethtool_ops;
+	else
+		netdev->ethtool_ops = &hns3_ethtool_ops;
 }
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index d2b20d0..cb8ddd0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
 #
 # Makefile for the HISILICON network device drivers.
 #
@@ -5,11 +6,6 @@
 ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
 
 obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o
 
 hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
-
-obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
-
-hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index ce5ed88..3c3159b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -180,6 +180,10 @@ enum hclge_opcode_type {
 	/* Promisuous mode command */
 	HCLGE_OPC_CFG_PROMISC_MODE	= 0x0E01,
 
+	/* Vlan offload command */
+	HCLGE_OPC_VLAN_PORT_TX_CFG	= 0x0F01,
+	HCLGE_OPC_VLAN_PORT_RX_CFG	= 0x0F02,
+
 	/* Interrupts cmd */
 	HCLGE_OPC_ADD_RING_TO_VECTOR	= 0x1503,
 	HCLGE_OPC_DEL_RING_TO_VECTOR	= 0x1504,
@@ -191,6 +195,7 @@ enum hclge_opcode_type {
 	HCLGE_OPC_MAC_VLAN_INSERT	    = 0x1003,
 	HCLGE_OPC_MAC_ETHTYPE_ADD	    = 0x1010,
 	HCLGE_OPC_MAC_ETHTYPE_REMOVE	= 0x1011,
+	HCLGE_OPC_MAC_VLAN_MASK_SET	= 0x1012,
 
 	/* Multicast linear table cmd */
 	HCLGE_OPC_MTA_MAC_MODE_CFG	    = 0x1020,
@@ -399,6 +404,8 @@ struct hclge_pf_res_cmd {
 #define HCLGE_CFG_MAC_ADDR_H_M	GENMASK(15, 0)
 #define HCLGE_CFG_DEFAULT_SPEED_S	16
 #define HCLGE_CFG_DEFAULT_SPEED_M	GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S	24
+#define HCLGE_CFG_RSS_SIZE_M	GENMASK(31, 24)
 
 struct hclge_cfg_param_cmd {
 	__le32 offset;
@@ -549,8 +556,6 @@ struct hclge_config_auto_neg_cmd {
 	u8      rsv[20];
 };
 
-#define HCLGE_MAC_MIN_MTU		64
-#define HCLGE_MAC_MAX_MTU		9728
 #define HCLGE_MAC_UPLINK_PORT		0x100
 
 struct hclge_config_max_frm_size_cmd {
@@ -587,6 +592,15 @@ struct hclge_mac_vlan_tbl_entry_cmd {
 	u8      rsv2[6];
 };
 
+#define HCLGE_VLAN_MASK_EN_B		0x0
+struct hclge_mac_vlan_mask_entry_cmd {
+	u8 rsv0[2];
+	u8 vlan_mask;
+	u8 rsv1;
+	u8 mac_mask[6];
+	u8 rsv2[14];
+};
+
 #define HCLGE_CFG_MTA_MAC_SEL_S		0x0
 #define HCLGE_CFG_MTA_MAC_SEL_M		GENMASK(1, 0)
 #define HCLGE_CFG_MTA_MAC_EN_B		0x7
@@ -658,6 +672,47 @@ struct hclge_vlan_filter_vf_cfg_cmd {
 	u8  vf_bitmap[16];
 };
 
+#define HCLGE_ACCEPT_TAG_B		0
+#define HCLGE_ACCEPT_UNTAG_B		1
+#define HCLGE_PORT_INS_TAG1_EN_B	2
+#define HCLGE_PORT_INS_TAG2_EN_B	3
+#define HCLGE_CFG_NIC_ROCE_SEL_B	4
+struct hclge_vport_vtag_tx_cfg_cmd {
+	u8 vport_vlan_cfg;
+	u8 vf_offset;
+	u8 rsv1[2];
+	__le16 def_vlan_tag1;
+	__le16 def_vlan_tag2;
+	u8 vf_bitmap[8];
+	u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B		0
+#define HCLGE_REM_TAG2_EN_B		1
+#define HCLGE_SHOW_TAG1_EN_B		2
+#define HCLGE_SHOW_TAG2_EN_B		3
+struct hclge_vport_vtag_rx_cfg_cmd {
+	u8 vport_vlan_cfg;
+	u8 vf_offset;
+	u8 rsv1[6];
+	u8 vf_bitmap[8];
+	u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+	__le16 ot_vlan_type;
+	__le16 in_vlan_type;
+	u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+	__le16 ot_fst_vlan_type;
+	__le16 ot_sec_vlan_type;
+	__le16 in_fst_vlan_type;
+	__le16 in_sec_vlan_type;
+	u8 rsv[16];
+};
+
 struct hclge_cfg_com_tqp_queue_cmd {
 	__le16 tqp_id;
 	__le16 stream_id;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 59ed806..d7352f5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -17,10 +17,12 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
-
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
 #include "hclge_cmd.h"
 #include "hclge_dcb.h"
 #include "hclge_main.h"
+#include "hclge_mbx.h"
 #include "hclge_mdio.h"
 #include "hclge_tm.h"
 #include "hnae3.h"
@@ -34,6 +36,7 @@
 static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
 				     enum hclge_mta_dmac_sel_type mta_mac_sel,
 				     bool enable);
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
 static int hclge_init_vlan_config(struct hclge_dev *hdev);
 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
 
@@ -278,8 +281,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
 	{"mac_tx_undersize_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
-	{"mac_tx_overrsize_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)},
+	{"mac_tx_oversize_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
 	{"mac_tx_64_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
 	{"mac_tx_65_127_oct_pkt_num",
@@ -292,8 +295,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
 	{"mac_tx_1024_1518_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
-	{"mac_tx_1519_max_oct_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)},
+	{"mac_tx_1519_2047_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+	{"mac_tx_2048_4095_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+	{"mac_tx_4096_8191_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+	{"mac_tx_8192_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)},
+	{"mac_tx_8192_9216_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+	{"mac_tx_9217_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+	{"mac_tx_12288_16383_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+	{"mac_tx_1519_max_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+	{"mac_tx_1519_max_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
 	{"mac_rx_total_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
 	{"mac_rx_total_oct_num",
@@ -314,8 +333,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
 	{"mac_rx_undersize_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
-	{"mac_rx_overrsize_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)},
+	{"mac_rx_oversize_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
 	{"mac_rx_64_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
 	{"mac_rx_65_127_oct_pkt_num",
@@ -328,33 +347,49 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
 	{"mac_rx_1024_1518_oct_pkt_num",
 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
-	{"mac_rx_1519_max_oct_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)},
+	{"mac_rx_1519_2047_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+	{"mac_rx_2048_4095_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+	{"mac_rx_4096_8191_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+	{"mac_rx_8192_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)},
+	{"mac_rx_8192_9216_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+	{"mac_rx_9217_12287_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+	{"mac_rx_12288_16383_oct_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+	{"mac_rx_1519_max_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+	{"mac_rx_1519_max_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
 
-	{"mac_trans_fragment_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)},
-	{"mac_trans_undermin_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)},
-	{"mac_trans_jabber_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)},
-	{"mac_trans_err_all_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)},
-	{"mac_trans_from_app_good_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)},
-	{"mac_trans_from_app_bad_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)},
-	{"mac_rcv_fragment_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)},
-	{"mac_rcv_undermin_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)},
-	{"mac_rcv_jabber_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)},
-	{"mac_rcv_fcs_err_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)},
-	{"mac_rcv_send_app_good_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)},
-	{"mac_rcv_send_app_bad_pkt_num",
-		HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)}
+	{"mac_tx_fragment_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+	{"mac_tx_undermin_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+	{"mac_tx_jabber_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+	{"mac_tx_err_all_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+	{"mac_tx_from_app_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+	{"mac_tx_from_app_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+	{"mac_rx_fragment_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+	{"mac_rx_undermin_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+	{"mac_rx_jabber_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+	{"mac_rx_fcs_err_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+	{"mac_rx_send_app_good_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+	{"mac_rx_send_app_bad_pkt_num",
+		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
 };
 
 static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
@@ -462,7 +497,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
 
 static int hclge_mac_update_stats(struct hclge_dev *hdev)
 {
-#define HCLGE_MAC_CMD_NUM 17
+#define HCLGE_MAC_CMD_NUM 21
 #define HCLGE_RTN_DATA_NUM 4
 
 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
@@ -524,7 +559,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
 			return ret;
 		}
 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
-			le32_to_cpu(desc[0].data[4]);
+			le32_to_cpu(desc[0].data[1]);
 	}
 
 	for (i = 0; i < kinfo->num_tqps; i++) {
@@ -544,7 +579,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
 			return ret;
 		}
 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
-			le32_to_cpu(desc[0].data[4]);
+			le32_to_cpu(desc[0].data[1]);
 	}
 
 	return 0;
@@ -586,7 +621,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
 			struct hclge_tqp, q);
-		snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd",
+		snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
 			 tqp->index);
 		buff = buff + ETH_GSTRING_LEN;
 	}
@@ -594,7 +629,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
 	for (i = 0; i < kinfo->num_tqps; i++) {
 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
 			struct hclge_tqp, q);
-		snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd",
+		snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
 			 tqp->index);
 		buff = buff + ETH_GSTRING_LEN;
 	}
@@ -642,23 +677,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats,
 	net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num;
 	net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num;
 
-	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+	net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num;
 	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num;
-	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt;
 	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt;
 	net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt;
-	net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+	net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
 
 	net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num;
 	net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num;
 
-	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num;
+	net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num;
 	net_stats->rx_length_errors =
 		hw_stats->mac_stats.mac_rx_undersize_pkt_num;
 	net_stats->rx_length_errors +=
-		hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
 	net_stats->rx_over_errors =
-		hw_stats->mac_stats.mac_rx_overrsize_pkt_num;
+		hw_stats->mac_stats.mac_rx_oversize_pkt_num;
 }
 
 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
@@ -698,6 +732,9 @@ static void hclge_update_stats(struct hnae3_handle *handle,
 	struct hclge_hw_stats *hw_stats = &hdev->hw_stats;
 	int status;
 
+	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+		return;
+
 	status = hclge_mac_update_stats(hdev);
 	if (status)
 		dev_err(&hdev->pdev->dev,
@@ -723,6 +760,8 @@ static void hclge_update_stats(struct hnae3_handle *handle,
 			status);
 
 	hclge_update_netstat(hw_stats, net_stats);
+
+	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
 }
 
 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
@@ -981,6 +1020,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
 	cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]),
 					    HCLGE_CFG_DEFAULT_SPEED_M,
 					    HCLGE_CFG_DEFAULT_SPEED_S);
+	cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]),
+					   HCLGE_CFG_RSS_SIZE_M,
+					   HCLGE_CFG_RSS_SIZE_S);
+
 	for (i = 0; i < ETH_ALEN; i++)
 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
 
@@ -1058,7 +1101,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 
 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
 	hdev->base_tqp_pid = 0;
-	hdev->rss_size_max = 1;
+	hdev->rss_size_max = cfg.rss_size_max;
 	hdev->rx_buf_len = cfg.rx_buf_len;
 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
 	hdev->hw.mac.media_type = cfg.media_type;
@@ -1095,10 +1138,7 @@ static int hclge_configure(struct hclge_dev *hdev)
 	for (i = 0; i < hdev->tm_info.num_tc; i++)
 		hnae_set_bit(hdev->hw_tc_map, i, 1);
 
-	if (!hdev->num_vmdq_vport && !hdev->num_req_vfs)
-		hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-	else
-		hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE;
+	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
 
 	return ret;
 }
@@ -2132,28 +2172,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
 	return 0;
 }
 
-static int hclge_query_autoneg_result(struct hclge_dev *hdev)
-{
-	struct hclge_mac *mac = &hdev->hw.mac;
-	struct hclge_query_an_speed_dup_cmd *req;
-	struct hclge_desc desc;
-	int ret;
-
-	req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
-	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
-	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-	if (ret) {
-		dev_err(&hdev->pdev->dev,
-			"autoneg result query cmd failed %d.\n", ret);
-		return ret;
-	}
-
-	mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B);
-
-	return 0;
-}
-
 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
 {
 	struct hclge_config_auto_neg_cmd *req;
@@ -2189,15 +2207,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
+	struct phy_device *phydev = hdev->hw.mac.phydev;
 
-	hclge_query_autoneg_result(hdev);
+	if (phydev)
+		return phydev->autoneg;
 
 	return hdev->hw.mac.autoneg;
 }
 
+static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev,
+					   bool mask_vlan,
+					   u8 *mac_mask)
+{
+	struct hclge_mac_vlan_mask_entry_cmd *req;
+	struct hclge_desc desc;
+	int status;
+
+	req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data;
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false);
+
+	hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B,
+		     mask_vlan ? 1 : 0);
+	ether_addr_copy(req->mac_mask, mac_mask);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Config mac_vlan_mask failed for cmd_send, ret =%d\n",
+			status);
+
+	return status;
+}
+
 static int hclge_mac_init(struct hclge_dev *hdev)
 {
+	struct hnae3_handle *handle = &hdev->vport[0].nic;
+	struct net_device *netdev = handle->kinfo.netdev;
 	struct hclge_mac *mac = &hdev->hw.mac;
+	u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+	int mtu;
 	int ret;
 
 	ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL);
@@ -2223,7 +2271,45 @@ static int hclge_mac_init(struct hclge_dev *hdev)
 		return ret;
 	}
 
-	return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+	ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"set mta filter mode fail ret=%d\n", ret);
+		return ret;
+	}
+
+	ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"set default mac_vlan_mask fail ret=%d\n", ret);
+		return ret;
+	}
+
+	if (netdev)
+		mtu = netdev->mtu;
+	else
+		mtu = ETH_DATA_LEN;
+
+	ret = hclge_set_mtu(handle, mtu);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"set mtu failed ret=%d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+	if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->rst_service_task);
 }
 
 static void hclge_task_schedule(struct hclge_dev *hdev)
@@ -2350,6 +2436,7 @@ static void hclge_service_timer(struct timer_list *t)
 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
 
 	mod_timer(&hdev->service_timer, jiffies + HZ);
+	hdev->hw_stats.stats_timer++;
 	hclge_task_schedule(hdev);
 }
 
@@ -2362,6 +2449,64 @@ static void hclge_service_complete(struct hclge_dev *hdev)
 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
 }
 
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+	u32 rst_src_reg;
+	u32 cmdq_src_reg;
+
+	/* fetch the events from their corresponding regs */
+	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
+	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+
+	/* Assumption: If by any chance reset and mailbox events are reported
+	 * together then we will only process reset event in this go and will
+	 * defer the processing of the mailbox events. Since, we would have not
+	 * cleared RX CMDQ event this time we would receive again another
+	 * interrupt from H/W just for the mailbox.
+	 */
+
+	/* check for vector0 reset event sources */
+	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+		return HCLGE_VECTOR0_EVENT_RST;
+	}
+
+	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+		return HCLGE_VECTOR0_EVENT_RST;
+	}
+
+	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+		return HCLGE_VECTOR0_EVENT_RST;
+	}
+
+	/* check for vector0 mailbox(=CMDQ RX) event source */
+	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+		*clearval = cmdq_src_reg;
+		return HCLGE_VECTOR0_EVENT_MBX;
+	}
+
+	return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+				    u32 regclr)
+{
+	switch (event_type) {
+	case HCLGE_VECTOR0_EVENT_RST:
+		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+		break;
+	case HCLGE_VECTOR0_EVENT_MBX:
+		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+		break;
+	}
+}
+
 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
 {
 	writel(enable ? 1 : 0, vector->addr);
@@ -2370,10 +2515,38 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
 {
 	struct hclge_dev *hdev = data;
+	u32 event_cause;
+	u32 clearval;
 
 	hclge_enable_vector(&hdev->misc_vector, false);
-	if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
-		schedule_work(&hdev->service_task);
+	event_cause = hclge_check_event_cause(hdev, &clearval);
+
+	/* vector 0 interrupt is shared with reset and mailbox source events.*/
+	switch (event_cause) {
+	case HCLGE_VECTOR0_EVENT_RST:
+		hclge_reset_task_schedule(hdev);
+		break;
+	case HCLGE_VECTOR0_EVENT_MBX:
+		/* If we are here then,
+		 * 1. Either we are not handling any mbx task and we are not
+		 *    scheduled as well
+		 *                        OR
+		 * 2. We could be handling a mbx task but nothing more is
+		 *    scheduled.
+		 * In both cases, we should schedule mbx task as there are more
+		 * mbx messages reported by this interrupt.
+		 */
+		hclge_mbx_task_schedule(hdev);
+
+	default:
+		dev_dbg(&hdev->pdev->dev,
+			"received unknown or unhandled event of vector0\n");
+		break;
+	}
+
+	/* we should clear the source of interrupt */
+	hclge_clear_event_cause(hdev, event_cause, clearval);
+	hclge_enable_vector(&hdev->misc_vector, true);
 
 	return IRQ_HANDLED;
 }
@@ -2404,9 +2577,9 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
 
 	hclge_get_misc_vector(hdev);
 
-	ret = devm_request_irq(&hdev->pdev->dev,
-			       hdev->misc_vector.vector_irq,
-			       hclge_misc_irq_handle, 0, "hclge_misc", hdev);
+	/* this would be explicitly freed in the end */
+	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+			  0, "hclge_misc", hdev);
 	if (ret) {
 		hclge_free_vector(hdev, 0);
 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
@@ -2416,6 +2589,12 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev)
 	return ret;
 }
 
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+	free_irq(hdev->misc_vector.vector_irq, hdev);
+	hclge_free_vector(hdev, 0);
+}
+
 static int hclge_notify_client(struct hclge_dev *hdev,
 			       enum hnae3_reset_notify_type type)
 {
@@ -2471,12 +2650,6 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
 		cnt++;
 	}
 
-	/* must clear reset status register to
-	 * prevent driver detect reset interrupt again
-	 */
-	reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
-	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg);
-
 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
 		dev_warn(&hdev->pdev->dev,
 			 "Wait for reset timeout: %d\n", hdev->reset_type);
@@ -2505,12 +2678,12 @@ static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
 	return ret;
 }
 
-static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
+static void hclge_do_reset(struct hclge_dev *hdev)
 {
 	struct pci_dev *pdev = hdev->pdev;
 	u32 val;
 
-	switch (type) {
+	switch (hdev->reset_type) {
 	case HNAE3_GLOBAL_RESET:
 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
 		hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
@@ -2526,30 +2699,62 @@ static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type)
 	case HNAE3_FUNC_RESET:
 		dev_info(&pdev->dev, "PF Reset requested\n");
 		hclge_func_reset_cmd(hdev, 0);
+		/* schedule again to check later */
+		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+		hclge_reset_task_schedule(hdev);
 		break;
 	default:
 		dev_warn(&pdev->dev,
-			 "Unsupported reset type: %d\n", type);
+			 "Unsupported reset type: %d\n", hdev->reset_type);
 		break;
 	}
 }
 
-static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev)
+static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
+						   unsigned long *addr)
 {
 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
-	u32 rst_reg_val;
 
-	rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG);
-	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val)
+	/* return the highest priority reset level amongst all */
+	if (test_bit(HNAE3_GLOBAL_RESET, addr))
 		rst_level = HNAE3_GLOBAL_RESET;
-	else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val)
+	else if (test_bit(HNAE3_CORE_RESET, addr))
 		rst_level = HNAE3_CORE_RESET;
-	else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val)
+	else if (test_bit(HNAE3_IMP_RESET, addr))
 		rst_level = HNAE3_IMP_RESET;
+	else if (test_bit(HNAE3_FUNC_RESET, addr))
+		rst_level = HNAE3_FUNC_RESET;
+
+	/* now, clear all other resets */
+	clear_bit(HNAE3_GLOBAL_RESET, addr);
+	clear_bit(HNAE3_CORE_RESET, addr);
+	clear_bit(HNAE3_IMP_RESET, addr);
+	clear_bit(HNAE3_FUNC_RESET, addr);
 
 	return rst_level;
 }
 
+static void hclge_reset(struct hclge_dev *hdev)
+{
+	/* perform reset of the stack & ae device for a client */
+
+	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+	if (!hclge_reset_wait(hdev)) {
+		rtnl_lock();
+		hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+		hclge_reset_ae_dev(hdev->ae_dev);
+		hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+		rtnl_unlock();
+	} else {
+		/* schedule again to check pending resets later */
+		set_bit(hdev->reset_type, &hdev->reset_pending);
+		hclge_reset_task_schedule(hdev);
+	}
+
+	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
 static void hclge_reset_event(struct hnae3_handle *handle,
 			      enum hnae3_reset_type reset)
 {
@@ -2563,14 +2768,9 @@ static void hclge_reset_event(struct hnae3_handle *handle,
 	case HNAE3_FUNC_RESET:
 	case HNAE3_CORE_RESET:
 	case HNAE3_GLOBAL_RESET:
-		if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) {
-			dev_err(&hdev->pdev->dev, "Already in reset state");
-			return;
-		}
-		hdev->reset_type = reset;
-		set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
-		set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-		schedule_work(&hdev->service_task);
+		/* request reset & schedule reset task */
+		set_bit(reset, &hdev->reset_request);
+		hclge_reset_task_schedule(hdev);
 		break;
 	default:
 		dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset);
@@ -2580,49 +2780,55 @@ static void hclge_reset_event(struct hnae3_handle *handle,
 
 static void hclge_reset_subtask(struct hclge_dev *hdev)
 {
-	bool do_reset;
+	/* check if there is any ongoing reset in the hardware. This status can
+	 * be checked from reset_pending. If there is then, we need to wait for
+	 * hardware to complete reset.
+	 *    a. If we are able to figure out in reasonable time that hardware
+	 *       has fully resetted then, we can proceed with driver, client
+	 *       reset.
+	 *    b. else, we can come back later to check this status so re-sched
+	 *       now.
+	 */
+	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
+	if (hdev->reset_type != HNAE3_NONE_RESET)
+		hclge_reset(hdev);
 
-	do_reset = hdev->reset_type != HNAE3_NONE_RESET;
+	/* check if we got any *new* reset requests to be honored */
+	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
+	if (hdev->reset_type != HNAE3_NONE_RESET)
+		hclge_do_reset(hdev);
 
-	/* Reset is detected by interrupt */
-	if (hdev->reset_type == HNAE3_NONE_RESET)
-		hdev->reset_type = hclge_detected_reset_event(hdev);
-
-	if (hdev->reset_type == HNAE3_NONE_RESET)
-		return;
-
-	switch (hdev->reset_type) {
-	case HNAE3_FUNC_RESET:
-	case HNAE3_CORE_RESET:
-	case HNAE3_GLOBAL_RESET:
-	case HNAE3_IMP_RESET:
-		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
-
-		if (do_reset)
-			hclge_do_reset(hdev, hdev->reset_type);
-		else
-			set_bit(HCLGE_STATE_RESET_INT, &hdev->state);
-
-		if (!hclge_reset_wait(hdev)) {
-			hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-			hclge_reset_ae_dev(hdev->ae_dev);
-			hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
-			clear_bit(HCLGE_STATE_RESET_INT, &hdev->state);
-		}
-		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
-		break;
-	default:
-		dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n",
-			hdev->reset_type);
-		break;
-	}
 	hdev->reset_type = HNAE3_NONE_RESET;
 }
 
-static void hclge_misc_irq_service_task(struct hclge_dev *hdev)
+static void hclge_reset_service_task(struct work_struct *work)
 {
+	struct hclge_dev *hdev =
+		container_of(work, struct hclge_dev, rst_service_task);
+
+	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+		return;
+
+	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+
 	hclge_reset_subtask(hdev);
-	hclge_enable_vector(&hdev->misc_vector, true);
+
+	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static void hclge_mailbox_service_task(struct work_struct *work)
+{
+	struct hclge_dev *hdev =
+		container_of(work, struct hclge_dev, mbx_service_task);
+
+	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+		return;
+
+	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+	hclge_mbx_handler(hdev);
+
+	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
 }
 
 static void hclge_service_task(struct work_struct *work)
@@ -2630,10 +2836,13 @@ static void hclge_service_task(struct work_struct *work)
 	struct hclge_dev *hdev =
 		container_of(work, struct hclge_dev, service_task);
 
-	hclge_misc_irq_service_task(hdev);
+	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
+		hclge_update_stats_for_all(hdev);
+		hdev->hw_stats.stats_timer = 0;
+	}
+
 	hclge_update_speed_duplex(hdev);
 	hclge_update_link_status(hdev);
-	hclge_update_stats_for_all(hdev);
 	hclge_service_complete(hdev);
 }
 
@@ -3174,49 +3383,48 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
 	return ret;
 }
 
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
-				   struct hnae3_ring_chain_node *ring_chain)
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+				int vector_id, bool en,
+				struct hnae3_ring_chain_node *ring_chain)
 {
 	struct hclge_dev *hdev = vport->back;
-	struct hclge_ctrl_vector_chain_cmd *req;
 	struct hnae3_ring_chain_node *node;
 	struct hclge_desc desc;
-	int ret;
+	struct hclge_ctrl_vector_chain_cmd *req
+		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+	enum hclge_cmd_status status;
+	enum hclge_opcode_type op;
+	u16 tqp_type_and_id;
 	int i;
 
-	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
-
-	req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+	hclge_cmd_setup_basic_desc(&desc, op, false);
 	req->int_vector_id = vector_id;
 
 	i = 0;
 	for (node = ring_chain; node; node = node->next) {
-		u16 type_and_id = 0;
-
-		hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+		hnae_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
+			       HCLGE_INT_TYPE_S,
 			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-		hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
-			       node->tqp_index);
-		hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
-			       HCLGE_INT_GL_IDX_S,
-			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-		req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
-		req->vfid = vport->vport_id;
-
+		hnae_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+			       HCLGE_TQP_ID_S, node->tqp_index);
+		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+			req->vfid = vport->vport_id;
 
-			ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-			if (ret) {
+			status = hclge_cmd_send(&hdev->hw, &desc, 1);
+			if (status) {
 				dev_err(&hdev->pdev->dev,
 					"Map TQP fail, status is %d.\n",
-					ret);
-				return ret;
+					status);
+				return -EIO;
 			}
 			i = 0;
 
 			hclge_cmd_setup_basic_desc(&desc,
-						   HCLGE_OPC_ADD_RING_TO_VECTOR,
+						   op,
 						   false);
 			req->int_vector_id = vector_id;
 		}
@@ -3224,21 +3432,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
 
 	if (i > 0) {
 		req->int_cause_num = i;
-
-		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-		if (ret) {
+		req->vfid = vport->vport_id;
+		status = hclge_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
 			dev_err(&hdev->pdev->dev,
-				"Map TQP fail, status is %d.\n", ret);
-			return ret;
+				"Map TQP fail, status is %d.\n", status);
+			return -EIO;
 		}
 	}
 
 	return 0;
 }
 
-static int hclge_map_handle_ring_to_vector(
-		struct hnae3_handle *handle, int vector,
-		struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
+				    int vector,
+				    struct hnae3_ring_chain_node *ring_chain)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
@@ -3247,24 +3455,20 @@ static int hclge_map_handle_ring_to_vector(
 	vector_id = hclge_get_vector_index(hdev, vector);
 	if (vector_id < 0) {
 		dev_err(&hdev->pdev->dev,
-			"Get vector index fail. ret =%d\n", vector_id);
+			"Get vector index fail. vector_id =%d\n", vector_id);
 		return vector_id;
 	}
 
-	return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain);
+	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
 }
 
-static int hclge_unmap_ring_from_vector(
-	struct hnae3_handle *handle, int vector,
-	struct hnae3_ring_chain_node *ring_chain)
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
+				       int vector,
+				       struct hnae3_ring_chain_node *ring_chain)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
-	struct hclge_ctrl_vector_chain_cmd *req;
-	struct hnae3_ring_chain_node *node;
-	struct hclge_desc desc;
-	int i, vector_id;
-	int ret;
+	int vector_id, ret;
 
 	vector_id = hclge_get_vector_index(hdev, vector);
 	if (vector_id < 0) {
@@ -3273,54 +3477,17 @@ static int hclge_unmap_ring_from_vector(
 		return vector_id;
 	}
 
-	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
-
-	req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
-	req->int_vector_id = vector_id;
-
-	i = 0;
-	for (node = ring_chain; node; node = node->next) {
-		u16 type_and_id = 0;
-
-		hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
-			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-		hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
-			       node->tqp_index);
-		hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
-			       HCLGE_INT_GL_IDX_S,
-			       hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
-
-		req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
-		req->vfid = vport->vport_id;
-
-		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
-			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
-
-			ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-			if (ret) {
-				dev_err(&hdev->pdev->dev,
-					"Unmap TQP fail, status is %d.\n",
-					ret);
-				return ret;
-			}
-			i = 0;
-			hclge_cmd_setup_basic_desc(&desc,
-						   HCLGE_OPC_DEL_RING_TO_VECTOR,
-						   false);
-			req->int_vector_id = vector_id;
-		}
+	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+	if (ret) {
+		dev_err(&handle->pdev->dev,
+			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+			vector_id,
+			ret);
+		return ret;
 	}
 
-	if (i > 0) {
-		req->int_cause_num = i;
-
-		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
-		if (ret) {
-			dev_err(&hdev->pdev->dev,
-				"Unmap TQP fail, status is %d.\n", ret);
-			return ret;
-		}
-	}
+	/* Free this MSIX or MSI vector */
+	hclge_free_vector(hdev, vector_id);
 
 	return 0;
 }
@@ -4090,6 +4257,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
 	const unsigned char *new_addr = (const unsigned char *)p;
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
+	int ret;
 
 	/* mac addr check */
 	if (is_zero_ether_addr(new_addr) ||
@@ -4101,14 +4269,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
 		return -EINVAL;
 	}
 
-	hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+	ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr);
+	if (ret)
+		dev_warn(&hdev->pdev->dev,
+			 "remove old uc mac address fail, ret =%d.\n",
+			 ret);
 
-	if (!hclge_add_uc_addr(handle, new_addr)) {
-		ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
-		return 0;
+	ret = hclge_add_uc_addr(handle, new_addr);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"add uc mac address fail, ret =%d.\n",
+			ret);
+
+		ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr);
+		if (ret) {
+			dev_err(&hdev->pdev->dev,
+				"restore uc mac address fail, ret =%d.\n",
+				ret);
+		}
+
+		return -EIO;
 	}
 
-	return -EIO;
+	ret = hclge_mac_pause_addr_cfg(hdev, new_addr);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"configure mac pause address fail, ret =%d.\n",
+			ret);
+		return -EIO;
+	}
+
+	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+
+	return 0;
 }
 
 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
@@ -4134,6 +4327,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
 	return 0;
 }
 
+#define HCLGE_FILTER_TYPE_VF		0
+#define HCLGE_FILTER_TYPE_PORT		1
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable);
+}
+
 int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
 			     bool is_kill, u16 vlan, u8 qos, __be16 proto)
 {
@@ -4250,43 +4454,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
 	return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto);
 }
 
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+	struct hclge_vport_vtag_tx_cfg_cmd *req;
+	struct hclge_dev *hdev = vport->back;
+	struct hclge_desc desc;
+	int status;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B,
+		     vcfg->accept_tag ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B,
+		     vcfg->accept_untag ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+		     vcfg->insert_tag1_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+		     vcfg->insert_tag2_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+	req->vf_bitmap[req->vf_offset] =
+		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Send port txvlan cfg command fail, ret =%d\n",
+			status);
+
+	return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+	struct hclge_vport_vtag_rx_cfg_cmd *req;
+	struct hclge_dev *hdev = vport->back;
+	struct hclge_desc desc;
+	int status;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+		     vcfg->strip_tag1_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+		     vcfg->strip_tag2_en ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+		     vcfg->vlan1_vlan_prionly ? 1 : 0);
+	hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+		     vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+	req->vf_bitmap[req->vf_offset] =
+		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Send port rxvlan cfg command fail, ret =%d\n",
+			status);
+
+	return status;
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+	struct hclge_desc desc;
+	int status;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+	rx_req->ot_fst_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+	rx_req->ot_sec_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+	rx_req->in_fst_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+	rx_req->in_sec_vlan_type =
+		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status) {
+		dev_err(&hdev->pdev->dev,
+			"Send rxvlan protocol type command fail, ret =%d\n",
+			status);
+		return status;
+	}
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data;
+	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Send txvlan protocol type command fail, ret =%d\n",
+			status);
+
+	return status;
+}
+
 static int hclge_init_vlan_config(struct hclge_dev *hdev)
 {
-#define HCLGE_VLAN_TYPE_VF_TABLE   0
-#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+#define HCLGE_DEF_VLAN_TYPE		0x8100
+
 	struct hnae3_handle *handle;
+	struct hclge_vport *vport;
 	int ret;
+	int i;
 
-	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
-					 true);
+	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true);
 	if (ret)
 		return ret;
 
-	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
-					 true);
+	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true);
 	if (ret)
 		return ret;
 
+	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+	ret = hclge_set_vlan_protocol_type(hdev);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < hdev->num_alloc_vport; i++) {
+		vport = &hdev->vport[i];
+		vport->txvlan_cfg.accept_tag = true;
+		vport->txvlan_cfg.accept_untag = true;
+		vport->txvlan_cfg.insert_tag1_en = false;
+		vport->txvlan_cfg.insert_tag2_en = false;
+		vport->txvlan_cfg.default_tag1 = 0;
+		vport->txvlan_cfg.default_tag2 = 0;
+
+		ret = hclge_set_vlan_tx_offload_cfg(vport);
+		if (ret)
+			return ret;
+
+		vport->rxvlan_cfg.strip_tag1_en = false;
+		vport->rxvlan_cfg.strip_tag2_en = true;
+		vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+		vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+		ret = hclge_set_vlan_rx_offload_cfg(vport);
+		if (ret)
+			return ret;
+	}
+
 	handle = &hdev->vport[0].nic;
 	return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
 }
 
+static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+
+	vport->rxvlan_cfg.strip_tag1_en = false;
+	vport->rxvlan_cfg.strip_tag2_en = enable;
+	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+	return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_config_max_frm_size_cmd *req;
 	struct hclge_dev *hdev = vport->back;
 	struct hclge_desc desc;
+	int max_frm_size;
 	int ret;
 
-	if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU))
+	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+	    max_frm_size > HCLGE_MAC_MAX_FRAME)
 		return -EINVAL;
 
-	hdev->mps = new_mtu;
+	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+
 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
 
 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
-	req->max_frm_size = cpu_to_le16(new_mtu);
+	req->max_frm_size = cpu_to_le16(max_frm_size);
 
 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 	if (ret) {
@@ -4294,6 +4659,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
 		return ret;
 	}
 
+	hdev->mps = max_frm_size;
+
 	return 0;
 }
 
@@ -4341,7 +4708,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
 	return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
 }
 
-static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 {
 	struct hclge_vport *vport = hclge_get_vport(handle);
 	struct hclge_dev *hdev = vport->back;
@@ -4392,6 +4759,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
 	return hdev->fw_version;
 }
 
+static void hclge_get_flowctrl_adv(struct hnae3_handle *handle,
+				   u32 *flowctrl_adv)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+
+	if (!phydev)
+		return;
+
+	*flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) |
+			 (phydev->advertising & ADVERTISED_Asym_Pause);
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+
+	if (!phydev)
+		return;
+
+	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+	if (rx_en)
+		phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+
+	if (tx_en)
+		phydev->advertising ^= ADVERTISED_Asym_Pause;
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+	int ret;
+
+	if (rx_en && tx_en)
+		hdev->fc_mode_last_time = HCLGE_FC_FULL;
+	else if (rx_en && !tx_en)
+		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+	else if (!rx_en && tx_en)
+		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+	else
+		hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+		return 0;
+
+	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
+			ret);
+		return ret;
+	}
+
+	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+
+	return 0;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+	u16 remote_advertising = 0;
+	u16 local_advertising = 0;
+	u32 rx_pause, tx_pause;
+	u8 flowctl;
+
+	if (!phydev->link || !phydev->autoneg)
+		return 0;
+
+	if (phydev->advertising & ADVERTISED_Pause)
+		local_advertising = ADVERTISE_PAUSE_CAP;
+
+	if (phydev->advertising & ADVERTISED_Asym_Pause)
+		local_advertising |= ADVERTISE_PAUSE_ASYM;
+
+	if (phydev->pause)
+		remote_advertising = LPA_PAUSE_CAP;
+
+	if (phydev->asym_pause)
+		remote_advertising |= LPA_PAUSE_ASYM;
+
+	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+					   remote_advertising);
+	tx_pause = flowctl & FLOW_CTRL_TX;
+	rx_pause = flowctl & FLOW_CTRL_RX;
+
+	if (phydev->duplex == HCLGE_MAC_HALF) {
+		tx_pause = 0;
+		rx_pause = 0;
+	}
+
+	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
 				 u32 *rx_en, u32 *tx_en)
 {
@@ -4421,6 +4882,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
 	}
 }
 
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+				u32 rx_en, u32 tx_en)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	struct phy_device *phydev = hdev->hw.mac.phydev;
+	u32 fc_autoneg;
+
+	/* Only support flow control negotiation for netdev with
+	 * phy attached for now.
+	 */
+	if (!phydev)
+		return -EOPNOTSUPP;
+
+	fc_autoneg = hclge_get_autoneg(handle);
+	if (auto_neg != fc_autoneg) {
+		dev_info(&hdev->pdev->dev,
+			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+		dev_info(&hdev->pdev->dev,
+			 "Priority flow control enabled. Cannot set link flow control.\n");
+		return -EOPNOTSUPP;
+	}
+
+	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+	if (!fc_autoneg)
+		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+	return phy_start_aneg(phydev);
+}
+
 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
 					  u8 *auto_neg, u32 *speed, u8 *duplex)
 {
@@ -4661,6 +5157,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 	hdev->pdev = pdev;
 	hdev->ae_dev = ae_dev;
 	hdev->reset_type = HNAE3_NONE_RESET;
+	hdev->reset_request = 0;
+	hdev->reset_pending = 0;
 	ae_dev->priv = hdev;
 
 	ret = hclge_pci_init(hdev);
@@ -4772,12 +5270,18 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
 
 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
 	INIT_WORK(&hdev->service_task, hclge_service_task);
+	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
+	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
 
 	/* Enable MISC vector(vector0) */
 	hclge_enable_vector(&hdev->misc_vector, true);
 
 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
+	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
 
 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
 	return 0;
@@ -4889,25 +5393,159 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
 		del_timer_sync(&hdev->service_timer);
 	if (hdev->service_task.func)
 		cancel_work_sync(&hdev->service_task);
+	if (hdev->rst_service_task.func)
+		cancel_work_sync(&hdev->rst_service_task);
+	if (hdev->mbx_service_task.func)
+		cancel_work_sync(&hdev->mbx_service_task);
 
 	if (mac->phydev)
 		mdiobus_unregister(mac->mdio_bus);
 
 	/* Disable MISC vector(vector0) */
 	hclge_enable_vector(&hdev->misc_vector, false);
-	hclge_free_vector(hdev, 0);
 	hclge_destroy_cmd_queue(&hdev->hw);
+	hclge_misc_irq_uninit(hdev);
 	hclge_pci_uninit(hdev);
 	ae_dev->priv = NULL;
 }
 
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+
+	return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+			       struct ethtool_channels *ch)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+
+	ch->max_combined = hclge_get_max_channels(handle);
+	ch->other_count = 1;
+	ch->max_other = 1;
+	ch->combined_count = vport->alloc_tqps;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+					u16 *free_tqps, u16 *max_rss_size)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hclge_dev *hdev = vport->back;
+	u16 temp_tqps = 0;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		if (!hdev->htqp[i].alloced)
+			temp_tqps++;
+	}
+	*free_tqps = temp_tqps;
+	*max_rss_size = hdev->rss_size_max;
+}
+
+static void hclge_release_tqp(struct hclge_vport *vport)
+{
+	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+	struct hclge_dev *hdev = vport->back;
+	int i;
+
+	for (i = 0; i < kinfo->num_tqps; i++) {
+		struct hclge_tqp *tqp =
+			container_of(kinfo->tqp[i], struct hclge_tqp, q);
+
+		tqp->q.handle = NULL;
+		tqp->q.tqp_index = 0;
+		tqp->alloced = false;
+	}
+
+	devm_kfree(&hdev->pdev->dev, kinfo->tqp);
+	kinfo->tqp = NULL;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
+{
+	struct hclge_vport *vport = hclge_get_vport(handle);
+	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+	struct hclge_dev *hdev = vport->back;
+	int cur_rss_size = kinfo->rss_size;
+	int cur_tqps = kinfo->num_tqps;
+	u16 tc_offset[HCLGE_MAX_TC_NUM];
+	u16 tc_valid[HCLGE_MAX_TC_NUM];
+	u16 tc_size[HCLGE_MAX_TC_NUM];
+	u16 roundup_size;
+	u32 *rss_indir;
+	int ret, i;
+
+	hclge_release_tqp(vport);
+
+	ret = hclge_knic_setup(vport, new_tqps_num);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
+		return ret;
+	}
+
+	ret = hclge_map_tqp_to_vport(hdev, vport);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
+		return ret;
+	}
+
+	ret = hclge_tm_schd_init(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret);
+		return ret;
+	}
+
+	roundup_size = roundup_pow_of_two(kinfo->rss_size);
+	roundup_size = ilog2(roundup_size);
+	/* Set the RSS TC mode according to the new RSS size */
+	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+		tc_valid[i] = 0;
+
+		if (!(hdev->hw_tc_map & BIT(i)))
+			continue;
+
+		tc_valid[i] = 1;
+		tc_size[i] = roundup_size;
+		tc_offset[i] = kinfo->rss_size * i;
+	}
+	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+	if (ret)
+		return ret;
+
+	/* Reinitializes the rss indirect table according to the new RSS size */
+	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+	if (!rss_indir)
+		return -ENOMEM;
+
+	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+		rss_indir[i] = i % kinfo->rss_size;
+
+	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+	if (ret)
+		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+			ret);
+
+	kfree(rss_indir);
+
+	if (!ret)
+		dev_info(&hdev->pdev->dev,
+			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
+			 cur_rss_size, kinfo->rss_size,
+			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+	return ret;
+}
+
 static const struct hnae3_ae_ops hclge_ops = {
 	.init_ae_dev = hclge_init_ae_dev,
 	.uninit_ae_dev = hclge_uninit_ae_dev,
 	.init_client_instance = hclge_init_client_instance,
 	.uninit_client_instance = hclge_uninit_client_instance,
-	.map_ring_to_vector = hclge_map_handle_ring_to_vector,
-	.unmap_ring_from_vector = hclge_unmap_ring_from_vector,
+	.map_ring_to_vector = hclge_map_ring_to_vector,
+	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
 	.get_vector = hclge_get_vector,
 	.set_promisc_mode = hclge_set_promisc_mode,
 	.set_loopback = hclge_set_loopback,
@@ -4934,6 +5572,7 @@ static const struct hnae3_ae_ops hclge_ops = {
 	.set_autoneg = hclge_set_autoneg,
 	.get_autoneg = hclge_get_autoneg,
 	.get_pauseparam = hclge_get_pauseparam,
+	.set_pauseparam = hclge_set_pauseparam,
 	.set_mtu = hclge_set_mtu,
 	.reset_queue = hclge_reset_tqp,
 	.get_stats = hclge_get_stats,
@@ -4942,9 +5581,15 @@ static const struct hnae3_ae_ops hclge_ops = {
 	.get_sset_count = hclge_get_sset_count,
 	.get_fw_version = hclge_get_fw_version,
 	.get_mdix_mode = hclge_get_mdix_mode,
+	.enable_vlan_filter = hclge_enable_vlan_filter,
 	.set_vlan_filter = hclge_set_port_vlan_filter,
 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
 	.reset_event = hclge_reset_event,
+	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+	.set_channels = hclge_set_channels,
+	.get_channels = hclge_get_channels,
+	.get_flowctrl_adv = hclge_get_flowctrl_adv,
 };
 
 static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7027814..eeb6c8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -79,6 +79,10 @@
 #define HCLGE_PHY_MDIX_STATUS_B	(6)
 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B	(11)
 
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD           64
+#define HCLGE_VF_NUM_PER_BYTE          8
+
 /* Reset related Registers */
 #define HCLGE_MISC_RESET_STS_REG	0x20700
 #define HCLGE_GLOBAL_RESET_REG		0x20A00
@@ -92,6 +96,16 @@
 #define HCLGE_VECTOR0_CORERESET_INT_B	6
 #define HCLGE_VECTOR0_IMPRESET_INT_B	7
 
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG	0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B	1
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME		64
+#define HCLGE_MAC_MAX_FRAME		9728
+
 enum HCLGE_DEV_STATE {
 	HCLGE_STATE_REINITING,
 	HCLGE_STATE_DOWN,
@@ -99,12 +113,20 @@ enum HCLGE_DEV_STATE {
 	HCLGE_STATE_REMOVING,
 	HCLGE_STATE_SERVICE_INITED,
 	HCLGE_STATE_SERVICE_SCHED,
+	HCLGE_STATE_RST_SERVICE_SCHED,
+	HCLGE_STATE_RST_HANDLING,
+	HCLGE_STATE_MBX_SERVICE_SCHED,
 	HCLGE_STATE_MBX_HANDLING,
-	HCLGE_STATE_MBX_IRQ,
-	HCLGE_STATE_RESET_INT,
+	HCLGE_STATE_STATISTICS_UPDATING,
 	HCLGE_STATE_MAX
 };
 
+enum hclge_evt_cause {
+	HCLGE_VECTOR0_EVENT_RST,
+	HCLGE_VECTOR0_EVENT_MBX,
+	HCLGE_VECTOR0_EVENT_OTHER,
+};
+
 #define HCLGE_MPF_ENBALE 1
 struct hclge_caps {
 	u16 num_tqp;
@@ -208,6 +230,7 @@ struct hclge_cfg {
 	u8 tc_num;
 	u16 tqp_desc_num;
 	u16 rx_buf_len;
+	u16 rss_size_max;
 	u8 phy_addr;
 	u8 media_type;
 	u8 mac_addr[ETH_ALEN];
@@ -364,14 +387,23 @@ struct hclge_mac_stats {
 	u64 mac_tx_multi_pkt_num;
 	u64 mac_tx_broad_pkt_num;
 	u64 mac_tx_undersize_pkt_num;
-	u64 mac_tx_overrsize_pkt_num;
+	u64 mac_tx_oversize_pkt_num;
 	u64 mac_tx_64_oct_pkt_num;
 	u64 mac_tx_65_127_oct_pkt_num;
 	u64 mac_tx_128_255_oct_pkt_num;
 	u64 mac_tx_256_511_oct_pkt_num;
 	u64 mac_tx_512_1023_oct_pkt_num;
 	u64 mac_tx_1024_1518_oct_pkt_num;
-	u64 mac_tx_1519_max_oct_pkt_num;
+	u64 mac_tx_1519_2047_oct_pkt_num;
+	u64 mac_tx_2048_4095_oct_pkt_num;
+	u64 mac_tx_4096_8191_oct_pkt_num;
+	u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */
+	u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+	u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */
+	u64 mac_tx_12288_16383_oct_pkt_num;
+	u64 mac_tx_1519_max_good_oct_pkt_num;
+	u64 mac_tx_1519_max_bad_oct_pkt_num;
+
 	u64 mac_rx_total_pkt_num;
 	u64 mac_rx_total_oct_num;
 	u64 mac_rx_good_pkt_num;
@@ -382,33 +414,52 @@ struct hclge_mac_stats {
 	u64 mac_rx_multi_pkt_num;
 	u64 mac_rx_broad_pkt_num;
 	u64 mac_rx_undersize_pkt_num;
-	u64 mac_rx_overrsize_pkt_num;
+	u64 mac_rx_oversize_pkt_num;
 	u64 mac_rx_64_oct_pkt_num;
 	u64 mac_rx_65_127_oct_pkt_num;
 	u64 mac_rx_128_255_oct_pkt_num;
 	u64 mac_rx_256_511_oct_pkt_num;
 	u64 mac_rx_512_1023_oct_pkt_num;
 	u64 mac_rx_1024_1518_oct_pkt_num;
-	u64 mac_rx_1519_max_oct_pkt_num;
+	u64 mac_rx_1519_2047_oct_pkt_num;
+	u64 mac_rx_2048_4095_oct_pkt_num;
+	u64 mac_rx_4096_8191_oct_pkt_num;
+	u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */
+	u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */
+	u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */
+	u64 mac_rx_12288_16383_oct_pkt_num;
+	u64 mac_rx_1519_max_good_oct_pkt_num;
+	u64 mac_rx_1519_max_bad_oct_pkt_num;
 
-	u64 mac_trans_fragment_pkt_num;
-	u64 mac_trans_undermin_pkt_num;
-	u64 mac_trans_jabber_pkt_num;
-	u64 mac_trans_err_all_pkt_num;
-	u64 mac_trans_from_app_good_pkt_num;
-	u64 mac_trans_from_app_bad_pkt_num;
-	u64 mac_rcv_fragment_pkt_num;
-	u64 mac_rcv_undermin_pkt_num;
-	u64 mac_rcv_jabber_pkt_num;
-	u64 mac_rcv_fcs_err_pkt_num;
-	u64 mac_rcv_send_app_good_pkt_num;
-	u64 mac_rcv_send_app_bad_pkt_num;
+	u64 mac_tx_fragment_pkt_num;
+	u64 mac_tx_undermin_pkt_num;
+	u64 mac_tx_jabber_pkt_num;
+	u64 mac_tx_err_all_pkt_num;
+	u64 mac_tx_from_app_good_pkt_num;
+	u64 mac_tx_from_app_bad_pkt_num;
+	u64 mac_rx_fragment_pkt_num;
+	u64 mac_rx_undermin_pkt_num;
+	u64 mac_rx_jabber_pkt_num;
+	u64 mac_rx_fcs_err_pkt_num;
+	u64 mac_rx_send_app_good_pkt_num;
+	u64 mac_rx_send_app_bad_pkt_num;
 };
 
+#define HCLGE_STATS_TIMER_INTERVAL	(60 * 5)
 struct hclge_hw_stats {
 	struct hclge_mac_stats      mac_stats;
 	struct hclge_64_bit_stats   all_64_bit_stats;
 	struct hclge_32_bit_stats   all_32_bit_stats;
+	u32 stats_timer;
+};
+
+struct hclge_vlan_type_cfg {
+	u16 rx_ot_fst_vlan_type;
+	u16 rx_ot_sec_vlan_type;
+	u16 rx_in_fst_vlan_type;
+	u16 rx_in_sec_vlan_type;
+	u16 tx_ot_vlan_type;
+	u16 tx_in_vlan_type;
 };
 
 struct hclge_dev {
@@ -420,6 +471,8 @@ struct hclge_dev {
 	unsigned long state;
 
 	enum hnae3_reset_type reset_type;
+	unsigned long reset_request;	/* reset has been requested */
+	unsigned long reset_pending;	/* client rst is pending to be served */
 	u32 fw_version;
 	u16 num_vmdq_vport;		/* Num vmdq vport this PF has set up */
 	u16 num_tqps;			/* Num task queue pairs of this PF */
@@ -469,6 +522,8 @@ struct hclge_dev {
 	unsigned long service_timer_previous;
 	struct timer_list service_timer;
 	struct work_struct service_task;
+	struct work_struct rst_service_task;
+	struct work_struct mbx_service_task;
 
 	bool cur_promisc;
 	int num_alloc_vfs;	/* Actual number of VFs allocated */
@@ -493,6 +548,26 @@ struct hclge_dev {
 	enum hclge_mta_dmac_sel_type mta_mac_sel_type;
 	bool enable_mta; /* Mutilcast filter enable */
 	bool accept_mta_mc; /* Whether accept mta filter multicast */
+
+	struct hclge_vlan_type_cfg vlan_type_cfg;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+	bool accept_tag;	/* Whether accept tagged packet from host */
+	bool accept_untag;	/* Whether accept untagged packet from host */
+	bool insert_tag1_en;	/* Whether insert inner vlan tag */
+	bool insert_tag2_en;	/* Whether insert outer vlan tag */
+	u16  default_tag1;	/* The default inner vlan tag to insert */
+	u16  default_tag2;	/* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+	bool strip_tag1_en;	/* Whether strip inner vlan tag */
+	bool strip_tag2_en;	/* Whether strip outer vlan tag */
+	bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+	bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
 };
 
 struct hclge_vport {
@@ -507,6 +582,9 @@ struct hclge_vport {
 	u16 bw_limit;		/* VSI BW Limit (0 = disabled) */
 	u8  dwrr;
 
+	struct hclge_tx_vtag_cfg  txvlan_cfg;
+	struct hclge_rx_vtag_cfg  rxvlan_cfg;
+
 	int vport_id;
 	struct hclge_dev *back;  /* Back reference to associated dev */
 	struct hnae3_handle nic;
@@ -529,8 +607,10 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
 			      u8 func_id,
 			      bool enable);
 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
-int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector,
-				   struct hnae3_ring_chain_node *ring_chain);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+				int vector_id, bool en,
+				struct hnae3_ring_chain_node *ring_chain);
+
 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
 {
 	struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
@@ -544,4 +624,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
 
 int hclge_buffer_alloc(struct hclge_dev *hdev);
 int hclge_rss_init_hw(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
 #endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 0000000..96f453f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,410 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ *		  message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+				struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+				int resp_status,
+				u8 *resp_data, u16 resp_data_len)
+{
+	struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+	struct hclge_dev *hdev = vport->back;
+	enum hclge_cmd_status status;
+	struct hclge_desc desc;
+
+	resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+	if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+		dev_err(&hdev->pdev->dev,
+			"PF fail to gen resp to VF len %d exceeds max len %d\n",
+			resp_data_len,
+			HCLGE_MBX_MAX_RESP_DATA_SIZE);
+	}
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+	resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+	resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+
+	resp_pf_to_vf->msg[0] = HCLGE_MBX_PF_VF_RESP;
+	resp_pf_to_vf->msg[1] = vf_to_pf_req->msg[0];
+	resp_pf_to_vf->msg[2] = vf_to_pf_req->msg[1];
+	resp_pf_to_vf->msg[3] = (resp_status == 0) ? 0 : 1;
+
+	if (resp_data && resp_data_len > 0)
+		memcpy(&resp_pf_to_vf->msg[4], resp_data, resp_data_len);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"PF failed(=%d) to send response to VF\n", status);
+
+	return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+			      u16 mbx_opcode, u8 dest_vfid)
+{
+	struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+	struct hclge_dev *hdev = vport->back;
+	enum hclge_cmd_status status;
+	struct hclge_desc desc;
+
+	resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+	resp_pf_to_vf->dest_vfid = dest_vfid;
+	resp_pf_to_vf->msg_len = msg_len;
+	resp_pf_to_vf->msg[0] = mbx_opcode;
+
+	memcpy(&resp_pf_to_vf->msg[1], msg, msg_len);
+
+	status = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"PF failed(=%d) to send mailbox message to VF\n",
+			status);
+
+	return status;
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+	struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+	chain = head->next;
+
+	while (chain) {
+		chain_tmp = chain->next;
+		kzfree(chain);
+		chain = chain_tmp;
+	}
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqpid from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5] ~ msg[14]: other ring type and tqp id
+ */
+static int hclge_get_ring_chain_from_mbx(
+			struct hclge_mbx_vf_to_pf_cmd *req,
+			struct hnae3_ring_chain_node *ring_chain,
+			struct hclge_vport *vport)
+{
+#define HCLGE_RING_NODE_VARIABLE_NUM		3
+#define HCLGE_RING_MAP_MBX_BASIC_MSG_NUM	3
+	struct hnae3_ring_chain_node *cur_chain, *new_chain;
+	int ring_num;
+	int i;
+
+	ring_num = req->msg[2];
+
+	hnae_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, req->msg[3]);
+	ring_chain->tqp_index =
+			hclge_get_queue_id(vport->nic.kinfo.tqp[req->msg[4]]);
+
+	cur_chain = ring_chain;
+
+	for (i = 1; i < ring_num; i++) {
+		new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+		if (!new_chain)
+			goto err;
+
+		hnae_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+			     req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+			     HCLGE_RING_MAP_MBX_BASIC_MSG_NUM]);
+
+		new_chain->tqp_index =
+		hclge_get_queue_id(vport->nic.kinfo.tqp
+			[req->msg[HCLGE_RING_NODE_VARIABLE_NUM * i +
+			HCLGE_RING_MAP_MBX_BASIC_MSG_NUM + 1]]);
+
+		cur_chain->next = new_chain;
+		cur_chain = new_chain;
+	}
+
+	return 0;
+err:
+	hclge_free_vector_ring_chain(ring_chain);
+	return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+					     struct hclge_mbx_vf_to_pf_cmd *req)
+{
+	struct hnae3_ring_chain_node ring_chain;
+	int vector_id = req->msg[1];
+	int ret;
+
+	memset(&ring_chain, 0, sizeof(ring_chain));
+	ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+	if (ret)
+		return ret;
+
+	ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+	if (ret)
+		return ret;
+
+	hclge_free_vector_ring_chain(&ring_chain);
+
+	return 0;
+}
+
+static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+				     struct hclge_mbx_vf_to_pf_cmd *req)
+{
+	bool en = req->msg[1] ? true : false;
+	struct hclge_promisc_param param;
+
+	/* always enable broadcast promisc bit */
+	hclge_promisc_param_init(&param, en, en, true, vport->vport_id);
+	return hclge_cmd_set_promisc_mode(vport->back, &param);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+				    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				    bool gen_resp)
+{
+	const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+	struct hclge_dev *hdev = vport->back;
+	int status;
+
+	if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+		const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]);
+
+		hclge_rm_uc_addr_common(vport, old_addr);
+		status = hclge_add_uc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+		status = hclge_add_uc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+		status = hclge_rm_uc_addr_common(vport, mac_addr);
+	} else {
+		dev_err(&hdev->pdev->dev,
+			"failed to set unicast mac addr, unknown subcode %d\n",
+			mbx_req->msg[1]);
+		return -EIO;
+	}
+
+	if (gen_resp)
+		hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+	return 0;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+				    struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				    bool gen_resp)
+{
+	const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+	struct hclge_dev *hdev = vport->back;
+	int status;
+
+	if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+		status = hclge_add_mc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+		status = hclge_rm_mc_addr_common(vport, mac_addr);
+	} else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE) {
+		u8 func_id = vport->vport_id;
+		bool enable = mbx_req->msg[2];
+
+		status = hclge_cfg_func_mta_filter(hdev, func_id, enable);
+	} else {
+		dev_err(&hdev->pdev->dev,
+			"failed to set mcast mac addr, unknown subcode %d\n",
+			mbx_req->msg[1]);
+		return -EIO;
+	}
+
+	if (gen_resp)
+		hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+	return 0;
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+				 struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				 bool gen_resp)
+{
+	struct hclge_dev *hdev = vport->back;
+	int status = 0;
+
+	if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) {
+		u16 vlan, proto;
+		bool is_kill;
+
+		is_kill = !!mbx_req->msg[2];
+		memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan));
+		memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+		status = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+						  is_kill, vlan, 0,
+						  cpu_to_be16(proto));
+	}
+
+	if (gen_resp)
+		status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+	return status;
+}
+
+static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
+			       struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+			       bool gen_resp)
+{
+	struct hclge_dev *hdev = vport->back;
+	int ret;
+
+	ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map,
+				   sizeof(u8));
+
+	return ret;
+}
+
+static int hclge_get_vf_queue_info(struct hclge_vport *vport,
+				   struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+				   bool gen_resp)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN		8
+	u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN];
+	struct hclge_dev *hdev = vport->back;
+
+	/* get the queue related info */
+	memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16));
+	memcpy(&resp_data[2], &hdev->rss_size_max, sizeof(u16));
+	memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16));
+	memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16));
+
+	return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+				    HCLGE_TQPS_RSS_INFO_LEN);
+}
+
+static int hclge_get_link_info(struct hclge_vport *vport,
+			       struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+	struct hclge_dev *hdev = vport->back;
+	u16 link_status;
+	u8 msg_data[2];
+	u8 dest_vfid;
+
+	/* mac.link can only be 0 or 1 */
+	link_status = (u16)hdev->hw.mac.link;
+	memcpy(&msg_data[0], &link_status, sizeof(u16));
+	dest_vfid = mbx_req->mbx_src_vfid;
+
+	/* send this requested info to VF */
+	return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+				  HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+}
+
+static void hclge_reset_vf_queue(struct hclge_vport *vport,
+				 struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+	u16 queue_id;
+
+	memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+
+	hclge_reset_tqp(&vport->nic, queue_id);
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+	struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclge_vport *vport;
+	struct hclge_desc *desc;
+	int ret;
+
+	/* handle all the mailbox requests in the queue */
+	while (hnae_get_bit(crq->desc[crq->next_to_use].flag,
+			    HCLGE_CMDQ_RX_OUTVLD_B)) {
+		desc = &crq->desc[crq->next_to_use];
+		req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+		vport = &hdev->vport[req->mbx_src_vfid];
+
+		switch (req->msg[0]) {
+		case HCLGE_MBX_MAP_RING_TO_VECTOR:
+			ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+								req);
+			break;
+		case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
+			ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
+								req);
+			break;
+		case HCLGE_MBX_SET_PROMISC_MODE:
+			ret = hclge_set_vf_promisc_mode(vport, req);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to set VF promisc mode\n",
+					ret);
+			break;
+		case HCLGE_MBX_SET_UNICAST:
+			ret = hclge_set_vf_uc_mac_addr(vport, req, false);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to set VF UC MAC Addr\n",
+					ret);
+			break;
+		case HCLGE_MBX_SET_MULTICAST:
+			ret = hclge_set_vf_mc_mac_addr(vport, req, false);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to set VF MC MAC Addr\n",
+					ret);
+			break;
+		case HCLGE_MBX_SET_VLAN:
+			ret = hclge_set_vf_vlan_cfg(vport, req, false);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF failed(%d) to config VF's VLAN\n",
+					ret);
+			break;
+		case HCLGE_MBX_GET_QINFO:
+			ret = hclge_get_vf_queue_info(vport, req, true);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF failed(%d) to get Q info for VF\n",
+					ret);
+			break;
+		case HCLGE_MBX_GET_TCINFO:
+			ret = hclge_get_vf_tcinfo(vport, req, true);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF failed(%d) to get TC info for VF\n",
+					ret);
+			break;
+		case HCLGE_MBX_GET_LINK_STATUS:
+			ret = hclge_get_link_info(vport, req);
+			if (ret)
+				dev_err(&hdev->pdev->dev,
+					"PF fail(%d) to get link stat for VF\n",
+					ret);
+			break;
+		case HCLGE_MBX_QUEUE_RESET:
+			hclge_reset_vf_queue(vport, req);
+			break;
+		default:
+			dev_err(&hdev->pdev->dev,
+				"un-supported mailbox message, code = %d\n",
+				req->msg[0]);
+			break;
+		}
+		hclge_mbx_ring_ptr_move_crq(crq);
+	}
+
+	/* Write back CMDQ_RQ header pointer, M7 need this pointer */
+	hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 7069e94..c1dea3a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -17,6 +17,7 @@
 #define HCLGE_PHY_SUPPORTED_FEATURES	(SUPPORTED_Autoneg | \
 					 SUPPORTED_TP | \
 					 SUPPORTED_Pause | \
+					 SUPPORTED_Asym_Pause | \
 					 PHY_10BT_FEATURES | \
 					 PHY_100BT_FEATURES | \
 					 PHY_1000BT_FEATURES)
@@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
 	ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
 	if (ret)
 		netdev_err(netdev, "failed to adjust link.\n");
+
+	ret = hclge_cfg_flowctrl(hdev);
+	if (ret)
+		netdev_err(netdev, "failed to configure flow control.\n");
 }
 
 int hclge_mac_start_phy(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 7bfa2e5..36bd79a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,8 +23,8 @@ enum hclge_shaper_level {
 	HCLGE_SHAPER_LVL_PF	= 1,
 };
 
-#define HCLGE_SHAPER_BS_U_DEF	1
-#define HCLGE_SHAPER_BS_S_DEF	4
+#define HCLGE_SHAPER_BS_U_DEF	5
+#define HCLGE_SHAPER_BS_S_DEF	20
 
 #define HCLGE_ETHER_MAX_RATE	100000
 
@@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
 	return 0;
 }
 
-static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
 {
 	struct hclge_desc desc;
 
@@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
 	return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
+static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+				     u8 pause_trans_gap, u16 pause_trans_time)
+{
+	struct hclge_cfg_pause_param_cmd *pause_param;
+	struct hclge_desc desc;
+
+	pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+	ether_addr_copy(pause_param->mac_addr, addr);
+	pause_param->pause_trans_gap = pause_trans_gap;
+	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+	return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+	struct hclge_cfg_pause_param_cmd *pause_param;
+	struct hclge_desc desc;
+	u16 trans_time;
+	u8 trans_gap;
+	int ret;
+
+	pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data;
+
+	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+	if (ret)
+		return ret;
+
+	trans_gap = pause_param->pause_trans_gap;
+	trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+	return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap,
+					 trans_time);
+}
+
 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
 {
 	u8 tc;
@@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
 	return hclge_tm_schd_mode_hw(hdev);
 }
 
+static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+	struct hclge_mac *mac = &hdev->hw.mac;
+
+	return hclge_mac_pause_param_cfg(hdev, mac->mac_addr,
+					 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+					 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
 {
 	u8 enable_bitmap = 0;
@@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
 	int ret;
 	u8 i;
 
-	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
-		return hclge_mac_pause_setup_hw(hdev);
+	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+		ret = hclge_mac_pause_setup_hw(hdev);
+		if (ret)
+			return ret;
+
+		return hclge_mac_pause_param_setup_hw(hdev);
+	}
 
 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
 	if (!hnae3_dev_dcb_supported(hdev))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index bf59961..5401e75 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -18,6 +18,9 @@
 
 #define HCLGE_TM_PORT_BASE_MODE_MSK	BIT(0)
 
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP	0xFF
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME	0xFFFF
+
 /* SP or DWRR */
 #define HCLGE_TM_TX_SCHD_DWRR_MSK	BIT(0)
 #define HCLGE_TM_TX_SCHD_SP_MSK		(0xFE)
@@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd {
 	u8 pri_en_bitmap;
 };
 
+struct hclge_cfg_pause_param_cmd {
+	u8 mac_addr[ETH_ALEN];
+	u8 pause_trans_gap;
+	u8 rsvd;
+	__le16 pause_trans_time;
+};
+
 struct hclge_port_shapping_cmd {
 	__le32 port_shapping_para;
 };
@@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_map_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
 #endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
new file mode 100644
index 0000000..fb93bbd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
\ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
new file mode 100644
index 0000000..85985e7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
+#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
+					DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define cmq_ring_to_dev(ring)   (&(ring)->dev->pdev->dev)
+
+static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
+{
+	int ntc = ring->next_to_clean;
+	int ntu = ring->next_to_use;
+	int used;
+
+	used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+	return ring->desc_num - used - 1;
+}
+
+static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+{
+	struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+	u16 ntc = csq->next_to_clean;
+	struct hclgevf_desc *desc;
+	int clean = 0;
+	u32 head;
+
+	desc = &csq->desc[ntc];
+	head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+	while (head != ntc) {
+		memset(desc, 0, sizeof(*desc));
+		ntc++;
+		if (ntc == csq->desc_num)
+			ntc = 0;
+		desc = &csq->desc[ntc];
+		clean++;
+	}
+	csq->next_to_clean = ntc;
+
+	return clean;
+}
+
+static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
+{
+	u32 head;
+
+	head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+
+	return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclgevf_is_special_opcode(u16 opcode)
+{
+	u16 spec_opcode[] = {0x30, 0x31, 0x32};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+		if (spec_opcode[i] == opcode)
+			return true;
+	}
+
+	return false;
+}
+
+static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+	int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+	ring->desc = kzalloc(size, GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
+					     size, DMA_BIDIRECTIONAL);
+
+	if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
+		ring->desc_dma_addr = 0;
+		kfree(ring->desc);
+		ring->desc = NULL;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+	dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
+			 ring->desc_num * sizeof(ring->desc[0]),
+			 hclgevf_ring_to_dma_dir(ring));
+
+	ring->desc_dma_addr = 0;
+	kfree(ring->desc);
+	ring->desc = NULL;
+}
+
+static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
+				  struct hclgevf_cmq_ring *ring)
+{
+	struct hclgevf_hw *hw = &hdev->hw;
+	int ring_type = ring->flag;
+	u32 reg_val;
+	int ret;
+
+	ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+	spin_lock_init(&ring->lock);
+	ring->next_to_clean = 0;
+	ring->next_to_use = 0;
+	ring->dev = hdev;
+
+	/* allocate CSQ/CRQ descriptor */
+	ret = hclgevf_alloc_cmd_desc(ring);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
+			(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
+		return ret;
+	}
+
+	/* initialize the hardware registers with csq/crq dma-address,
+	 * descriptor number, head & tail pointers
+	 */
+	switch (ring_type) {
+	case HCLGEVF_TYPE_CSQ:
+		reg_val = (u32)ring->desc_dma_addr;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+		break;
+	case HCLGEVF_TYPE_CRQ:
+		reg_val = (u32)ring->desc_dma_addr;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+		reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+		reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+		reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+		hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+		break;
+	}
+
+	return 0;
+}
+
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+				  enum hclgevf_opcode_type opcode, bool is_read)
+{
+	memset(desc, 0, sizeof(struct hclgevf_desc));
+	desc->opcode = cpu_to_le16(opcode);
+	desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
+				 HCLGEVF_CMD_FLAG_IN);
+	if (is_read)
+		desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
+	else
+		desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
+}
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
+{
+	struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+	struct hclgevf_desc *desc_to_use;
+	bool complete = false;
+	u32 timeout = 0;
+	int handle = 0;
+	int status = 0;
+	u16 retval;
+	u16 opcode;
+	int ntc;
+
+	spin_lock_bh(&hw->cmq.csq.lock);
+
+	if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+		spin_unlock_bh(&hw->cmq.csq.lock);
+		return -EBUSY;
+	}
+
+	/* Record the location of desc in the ring for this time
+	 * which will be use for hardware to write back
+	 */
+	ntc = hw->cmq.csq.next_to_use;
+	opcode = le16_to_cpu(desc[0].opcode);
+	while (handle < num) {
+		desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+		*desc_to_use = desc[handle];
+		(hw->cmq.csq.next_to_use)++;
+		if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+			hw->cmq.csq.next_to_use = 0;
+		handle++;
+	}
+
+	/* Write to hardware */
+	hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
+			  hw->cmq.csq.next_to_use);
+
+	/* If the command is sync, wait for the firmware to write back,
+	 * if multi descriptors to be sent, use the first one to check
+	 */
+	if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+		do {
+			if (hclgevf_cmd_csq_done(hw))
+				break;
+			udelay(1);
+			timeout++;
+		} while (timeout < hw->cmq.tx_timeout);
+	}
+
+	if (hclgevf_cmd_csq_done(hw)) {
+		complete = true;
+		handle = 0;
+
+		while (handle < num) {
+			/* Get the result of hardware write back */
+			desc_to_use = &hw->cmq.csq.desc[ntc];
+			desc[handle] = *desc_to_use;
+
+			if (likely(!hclgevf_is_special_opcode(opcode)))
+				retval = le16_to_cpu(desc[handle].retval);
+			else
+				retval = le16_to_cpu(desc[0].retval);
+
+			if ((enum hclgevf_cmd_return_status)retval ==
+			    HCLGEVF_CMD_EXEC_SUCCESS)
+				status = 0;
+			else
+				status = -EIO;
+			hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
+			ntc++;
+			handle++;
+			if (ntc == hw->cmq.csq.desc_num)
+				ntc = 0;
+		}
+	}
+
+	if (!complete)
+		status = -EAGAIN;
+
+	/* Clean the command send queue */
+	handle = hclgevf_cmd_csq_clean(hw);
+	if (handle != num) {
+		dev_warn(&hdev->pdev->dev,
+			 "cleaned %d, need to clean %d\n", handle, num);
+	}
+
+	spin_unlock_bh(&hw->cmq.csq.lock);
+
+	return status;
+}
+
+static int  hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
+					       u32 *version)
+{
+	struct hclgevf_query_version_cmd *resp;
+	struct hclgevf_desc desc;
+	int status;
+
+	resp = (struct hclgevf_query_version_cmd *)desc.data;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+	status = hclgevf_cmd_send(hw, &desc, 1);
+	if (!status)
+		*version = le32_to_cpu(resp->firmware);
+
+	return status;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+	u32 version;
+	int ret;
+
+	/* setup Tx write back timeout */
+	hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+
+	/* setup queue CSQ/CRQ rings */
+	hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
+	ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize CSQ ring\n", ret);
+		return ret;
+	}
+
+	hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
+	ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize CRQ ring\n", ret);
+		goto err_csq;
+	}
+
+	/* get firmware version */
+	ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to query firmware version\n", ret);
+		goto err_crq;
+	}
+	hdev->fw_version = version;
+
+	dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+	return 0;
+err_crq:
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+err_csq:
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+
+	return ret;
+}
+
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
+{
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+	hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 0000000..ad8adfe
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+
+#define HCLGEVF_CMDQ_TX_TIMEOUT		200
+#define HCLGEVF_CMDQ_RX_INVLD_B		0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B	1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+struct hclgevf_desc {
+	__le16 opcode;
+	__le16 flag;
+	__le16 retval;
+	__le16 rsv;
+	__le32 data[6];
+};
+
+struct hclgevf_desc_cb {
+	dma_addr_t dma;
+	void *va;
+	u32 length;
+};
+
+struct hclgevf_cmq_ring {
+	dma_addr_t desc_dma_addr;
+	struct hclgevf_desc *desc;
+	struct hclgevf_desc_cb *desc_cb;
+	struct hclgevf_dev  *dev;
+	u32 head;
+	u32 tail;
+
+	u16 buf_size;
+	u16 desc_num;
+	int next_to_use;
+	int next_to_clean;
+	u8 flag;
+	spinlock_t lock; /* Command queue lock */
+};
+
+enum hclgevf_cmd_return_status {
+	HCLGEVF_CMD_EXEC_SUCCESS	= 0,
+	HCLGEVF_CMD_NO_AUTH	= 1,
+	HCLGEVF_CMD_NOT_EXEC	= 2,
+	HCLGEVF_CMD_QUEUE_FULL	= 3,
+};
+
+enum hclgevf_cmd_status {
+	HCLGEVF_STATUS_SUCCESS	= 0,
+	HCLGEVF_ERR_CSQ_FULL	= -1,
+	HCLGEVF_ERR_CSQ_TIMEOUT	= -2,
+	HCLGEVF_ERR_CSQ_ERROR	= -3
+};
+
+struct hclgevf_cmq {
+	struct hclgevf_cmq_ring csq;
+	struct hclgevf_cmq_ring crq;
+	u16 tx_timeout; /* Tx timeout */
+	enum hclgevf_cmd_status last_status;
+};
+
+#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT		0
+#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT	1
+#define HCLGEVF_CMD_FLAG_NEXT_SHIFT		2
+#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT		3
+#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT		4
+#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT		5
+
+#define HCLGEVF_CMD_FLAG_IN		BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_OUT		BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_NEXT		BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
+#define HCLGEVF_CMD_FLAG_WR		BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HCLGEVF_CMD_FLAG_NO_INTR	BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
+#define HCLGEVF_CMD_FLAG_ERR_INTR	BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
+
+enum hclgevf_opcode_type {
+	/* Generic command */
+	HCLGEVF_OPC_QUERY_FW_VER	= 0x0001,
+	/* TQP command */
+	HCLGEVF_OPC_QUERY_TX_STATUS	= 0x0B03,
+	HCLGEVF_OPC_QUERY_RX_STATUS	= 0x0B13,
+	HCLGEVF_OPC_CFG_COM_TQP_QUEUE	= 0x0B20,
+	/* TSO cmd */
+	HCLGEVF_OPC_TSO_GENERIC_CONFIG	= 0x0C01,
+	/* RSS cmd */
+	HCLGEVF_OPC_RSS_GENERIC_CONFIG	= 0x0D01,
+	HCLGEVF_OPC_RSS_INDIR_TABLE	= 0x0D07,
+	HCLGEVF_OPC_RSS_TC_MODE		= 0x0D08,
+	/* Mailbox cmd */
+	HCLGEVF_OPC_MBX_VF_TO_PF	= 0x2001,
+};
+
+#define HCLGEVF_TQP_REG_OFFSET		0x80000
+#define HCLGEVF_TQP_REG_SIZE		0x200
+
+struct hclgevf_tqp_map {
+	__le16 tqp_id;	/* Absolute tqp id for in this pf */
+	u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF		0
+#define HCLGEVF_TQP_MAP_TYPE_VF		1
+#define HCLGEVF_TQP_MAP_TYPE_B		0
+#define HCLGEVF_TQP_MAP_EN_B		1
+	u8 tqp_flag;	/* Indicate it's pf or vf tqp */
+	__le16 tqp_vid; /* Virtual id in this pf/vf */
+	u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD	10
+
+enum hclgevf_int_type {
+	HCLGEVF_INT_TX = 0,
+	HCLGEVF_INT_RX,
+	HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+	u8 int_vector_id;
+	u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S	0
+#define HCLGEVF_INT_TYPE_M	0x3
+#define HCLGEVF_TQP_ID_S	2
+#define HCLGEVF_TQP_ID_M	(0x3fff << HCLGEVF_TQP_ID_S)
+	__le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+	u8 vfid;
+	u8 resv;
+};
+
+struct hclgevf_query_version_cmd {
+	__le32 firmware;
+	__le32 firmware_rsv[5];
+};
+
+#define HCLGEVF_RSS_HASH_KEY_OFFSET	4
+#define HCLGEVF_RSS_HASH_KEY_NUM	16
+struct hclgevf_rss_config_cmd {
+	u8 hash_config;
+	u8 rsv[7];
+	u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
+};
+
+struct hclgevf_rss_input_tuple_cmd {
+	u8 ipv4_tcp_en;
+	u8 ipv4_udp_en;
+	u8 ipv4_stcp_en;
+	u8 ipv4_fragment_en;
+	u8 ipv6_tcp_en;
+	u8 ipv6_udp_en;
+	u8 ipv6_stcp_en;
+	u8 ipv6_fragment_en;
+	u8 rsv[16];
+};
+
+#define HCLGEVF_RSS_CFG_TBL_SIZE	16
+
+struct hclgevf_rss_indirection_table_cmd {
+	u16 start_table_index;
+	u16 rss_set_bitmap;
+	u8 rsv[4];
+	u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGEVF_RSS_TC_OFFSET_S		0
+#define HCLGEVF_RSS_TC_OFFSET_M		(0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_SIZE_S		12
+#define HCLGEVF_RSS_TC_SIZE_M		(0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_VALID_B		15
+#define HCLGEVF_MAX_TC_NUM		8
+struct hclgevf_rss_tc_mode_cmd {
+	u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+	u8 rsv[8];
+};
+
+#define HCLGEVF_LINK_STS_B	0
+#define HCLGEVF_LINK_STATUS	BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+	u8 status;
+	u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK	0x3ff
+#define HCLGEVF_TQP_ENABLE_B	0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+	__le16 tqp_id;
+	__le16 stream_id;
+	u8 enable;
+	u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+	__le16 tqp_id;
+	__le16 tx_tail;
+	__le16 tx_head;
+	__le16 fbd_num;
+	__le16 ring_offset;
+	u8 rsv[14];
+};
+
+#define HCLGEVF_TSO_ENABLE_B	0
+struct hclgevf_cfg_tso_status_cmd {
+	u8 tso_enable;
+	u8 rsv[23];
+};
+
+#define HCLGEVF_TYPE_CRQ		0
+#define HCLGEVF_TYPE_CSQ		1
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG	0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG	0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG	0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG	0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG	0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG	0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG	0x2701c
+#define HCLGEVF_NIC_CRQ_DEPTH_REG	0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG	0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG	0x27028
+#define HCLGEVF_NIC_CMQ_EN_B		16
+#define HCLGEVF_NIC_CMQ_ENABLE		BIT(HCLGEVF_NIC_CMQ_EN_B)
+#define HCLGEVF_NIC_CMQ_DESC_NUM	1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S	3
+#define HCLGEVF_NIC_CMDQ_INT_SRC_REG	0x27100
+
+static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+	writel(value, base + reg);
+}
+
+static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
+{
+	u8 __iomem *reg_addr = READ_ONCE(base);
+
+	return readl(reg_addr + reg);
+}
+
+#define hclgevf_write_dev(a, reg, value) \
+	hclgevf_write_reg((a)->io_base, (reg), (value))
+#define hclgevf_read_dev(a, reg) \
+	hclgevf_read_reg((a)->io_base, (reg))
+
+#define HCLGEVF_SEND_SYNC(flag) \
+	((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev);
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+				  enum hclgevf_opcode_type opcode,
+				  bool is_read);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 0000000..655f522
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,1490 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define HCLGEVF_NAME	"hclgevf"
+
+static struct hnae3_ae_algo ae_algovf;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
+	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+	/* required last entry */
+	{0, }
+};
+
+static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
+	struct hnae3_handle *handle)
+{
+	return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_queue *queue;
+	struct hclgevf_desc desc;
+	struct hclgevf_tqp *tqp;
+	int status;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		queue = handle->kinfo.tqp[i];
+		tqp = container_of(queue, struct hclgevf_tqp, q);
+		hclgevf_cmd_setup_basic_desc(&desc,
+					     HCLGEVF_OPC_QUERY_RX_STATUS,
+					     true);
+
+		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"Query tqp stat fail, status = %d,queue = %d\n",
+				status,	i);
+			return status;
+		}
+		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+			le32_to_cpu(desc.data[1]);
+
+		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
+					     true);
+
+		desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"Query tqp stat fail, status = %d,queue = %d\n",
+				status, i);
+			return status;
+		}
+		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+			le32_to_cpu(desc.data[1]);
+	}
+
+	return 0;
+}
+
+static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_tqp *tqp;
+	u64 *buff = data;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+	}
+	for (i = 0; i < kinfo->num_tqps; i++) {
+		tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q);
+		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+	}
+
+	return buff;
+}
+
+static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hdev->num_tqps * 2;
+}
+
+static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 *buff = data;
+	int i = 0;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+			struct hclgevf_tqp, q);
+		snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd",
+			 tqp->index);
+		buff += ETH_GSTRING_LEN;
+	}
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i],
+			struct hclgevf_tqp, q);
+		snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd",
+			 tqp->index);
+		buff += ETH_GSTRING_LEN;
+	}
+
+	return buff;
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle,
+				 struct net_device_stats *net_stats)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int status;
+
+	status = hclgevf_tqps_update_stats(handle);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"VF update of TQPS stats fail, status = %d.\n",
+			status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+	if (strset == ETH_SS_TEST)
+		return -EOPNOTSUPP;
+	else if (strset == ETH_SS_STATS)
+		return hclgevf_tqps_get_sset_count(handle, strset);
+
+	return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+				u8 *data)
+{
+	u8 *p = (char *)data;
+
+	if (strset == ETH_SS_STATS)
+		p = hclgevf_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+	hclgevf_tqps_get_stats(handle, data);
+}
+
+static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+{
+	u8 resp_msg;
+	int status;
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
+				      true, &resp_msg, sizeof(u8));
+	if (status) {
+		dev_err(&hdev->pdev->dev,
+			"VF request to get TC info from PF failed %d",
+			status);
+		return status;
+	}
+
+	hdev->hw_tc_map = resp_msg;
+
+	return 0;
+}
+
+static int hclge_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN	8
+	u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+	int status;
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
+				      true, resp_msg,
+				      HCLGEVF_TQPS_RSS_INFO_LEN);
+	if (status) {
+		dev_err(&hdev->pdev->dev,
+			"VF request to get tqp info from PF failed %d",
+			status);
+		return status;
+	}
+
+	memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
+	memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
+	memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
+	memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
+
+	return 0;
+}
+
+static int hclgevf_enable_tso(struct hclgevf_dev *hdev, int enable)
+{
+	struct hclgevf_cfg_tso_status_cmd *req;
+	struct hclgevf_desc desc;
+
+	req = (struct hclgevf_cfg_tso_status_cmd *)desc.data;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_TSO_GENERIC_CONFIG,
+				     false);
+	hnae_set_bit(req->tso_enable, HCLGEVF_TSO_ENABLE_B, enable);
+
+	return hclgevf_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_tqp *tqp;
+	int i;
+
+	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+				  sizeof(struct hclgevf_tqp), GFP_KERNEL);
+	if (!hdev->htqp)
+		return -ENOMEM;
+
+	tqp = hdev->htqp;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		tqp->dev = &hdev->pdev->dev;
+		tqp->index = i;
+
+		tqp->q.ae_algo = &ae_algovf;
+		tqp->q.buf_size = hdev->rx_buf_len;
+		tqp->q.desc_num = hdev->num_desc;
+		tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
+			i * HCLGEVF_TQP_REG_SIZE;
+
+		tqp++;
+	}
+
+	return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+	struct hnae3_handle *nic = &hdev->nic;
+	struct hnae3_knic_private_info *kinfo;
+	u16 new_tqps = hdev->num_tqps;
+	int i;
+
+	kinfo = &nic->kinfo;
+	kinfo->num_tc = 0;
+	kinfo->num_desc = hdev->num_desc;
+	kinfo->rx_buf_len = hdev->rx_buf_len;
+	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+		if (hdev->hw_tc_map & BIT(i))
+			kinfo->num_tc++;
+
+	kinfo->rss_size
+		= min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
+	new_tqps = kinfo->rss_size * kinfo->num_tc;
+	kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+				  sizeof(struct hnae3_queue *), GFP_KERNEL);
+	if (!kinfo->tqp)
+		return -ENOMEM;
+
+	for (i = 0; i < kinfo->num_tqps; i++) {
+		hdev->htqp[i].q.handle = &hdev->nic;
+		hdev->htqp[i].q.tqp_index = i;
+		kinfo->tqp[i] = &hdev->htqp[i].q;
+	}
+
+	return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+	int status;
+	u8 resp_msg;
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
+				      0, false, &resp_msg, sizeof(u8));
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+	struct hnae3_handle *handle = &hdev->nic;
+	struct hnae3_client *client;
+
+	client = handle->client;
+
+	if (link_state != hdev->hw.mac.link) {
+		client->ops->link_status_change(handle, !!link_state);
+		hdev->hw.mac.link = link_state;
+	}
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+	struct hnae3_handle *nic = &hdev->nic;
+	int ret;
+
+	nic->ae_algo = &ae_algovf;
+	nic->pdev = hdev->pdev;
+	nic->numa_node_mask = hdev->numa_node_mask;
+	nic->flags |= HNAE3_SUPPORT_VF;
+
+	if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
+		dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
+			hdev->ae_dev->dev_type);
+		return -EINVAL;
+	}
+
+	ret = hclgevf_knic_setup(hdev);
+	if (ret)
+		dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+			ret);
+	return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+	hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+	hdev->num_msi_left += 1;
+	hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+			      struct hnae3_vector_info *vector_info)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_vector_info *vector = vector_info;
+	int alloc = 0;
+	int i, j;
+
+	vector_num = min(hdev->num_msi_left, vector_num);
+
+	for (j = 0; j < vector_num; j++) {
+		for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+			if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+				vector->vector = pci_irq_vector(hdev->pdev, i);
+				vector->io_addr = hdev->hw.io_base +
+					HCLGEVF_VECTOR_REG_BASE +
+					(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+				hdev->vector_status[i] = 0;
+				hdev->vector_irq[i] = vector->vector;
+
+				vector++;
+				alloc++;
+
+				break;
+			}
+		}
+	}
+	hdev->num_msi_left -= alloc;
+	hdev->num_msi_used += alloc;
+
+	return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+	int i;
+
+	for (i = 0; i < hdev->num_msi; i++)
+		if (vector == hdev->vector_irq[i])
+			return i;
+
+	return -EINVAL;
+}
+
+static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
+{
+	return HCLGEVF_RSS_KEY_SIZE;
+}
+
+static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
+{
+	return HCLGEVF_RSS_IND_TBL_SIZE;
+}
+
+static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
+{
+	const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
+	struct hclgevf_rss_indirection_table_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+	int i, j;
+
+	req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+
+	for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+		hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
+					     false);
+		req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
+		req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+		for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
+			req->rss_result[j] =
+				indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
+
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"VF failed(=%d) to set RSS indirection table\n",
+				status);
+			return status;
+		}
+	}
+
+	return 0;
+}
+
+static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
+{
+	struct hclgevf_rss_tc_mode_cmd *req;
+	u16 tc_offset[HCLGEVF_MAX_TC_NUM];
+	u16 tc_valid[HCLGEVF_MAX_TC_NUM];
+	u16 tc_size[HCLGEVF_MAX_TC_NUM];
+	struct hclgevf_desc desc;
+	u16 roundup_size;
+	int status;
+	int i;
+
+	req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
+
+	roundup_size = roundup_pow_of_two(rss_size);
+	roundup_size = ilog2(roundup_size);
+
+	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+		tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+		tc_size[i] = roundup_size;
+		tc_offset[i] = rss_size * i;
+	}
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+	for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+		hnae_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+			     (tc_valid[i] & 0x1));
+		hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+			       HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+		hnae_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+			       HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+	}
+	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"VF failed(=%d) to set rss tc mode\n", status);
+
+	return status;
+}
+
+static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
+				  u8 *key)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_config_cmd *req;
+	int lkup_times = key ? 3 : 1;
+	struct hclgevf_desc desc;
+	int key_offset;
+	int key_size;
+	int status;
+
+	req = (struct hclgevf_rss_config_cmd *)desc.data;
+	lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
+
+	for (key_offset = 0; key_offset < lkup_times; key_offset++) {
+		hclgevf_cmd_setup_basic_desc(&desc,
+					     HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+					     true);
+		req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
+
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"failed to get hardware RSS cfg, status = %d\n",
+				status);
+			return status;
+		}
+
+		if (key_offset == 2)
+			key_size =
+			HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
+		else
+			key_size = HCLGEVF_RSS_HASH_KEY_NUM;
+
+		if (key)
+			memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
+			       req->hash_key,
+			       key_size);
+	}
+
+	if (hash) {
+		if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
+			*hash = ETH_RSS_HASH_TOP;
+		else
+			*hash = ETH_RSS_HASH_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+			   u8 *hfunc)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+	int i;
+
+	if (indir)
+		for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+			indir[i] = rss_cfg->rss_indirection_tbl[i];
+
+	return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+			   const  u8 *key, const  u8 hfunc)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+	int i;
+
+	/* update the shadow RSS table with user specified qids */
+	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+		rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+	/* update the hardware */
+	return hclgevf_set_rss_indir_table(hdev);
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+	return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+				       int vector,
+				       struct hnae3_ring_chain_node *ring_chain)
+{
+#define HCLGEVF_RING_NODE_VARIABLE_NUM		3
+#define HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM	3
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_ring_chain_node *node;
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclgevf_desc desc;
+	int i, vector_id;
+	int status;
+	u8 type;
+
+	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+	vector_id = hclgevf_get_vector_index(hdev, vector);
+	if (vector_id < 0) {
+		dev_err(&handle->pdev->dev,
+			"Get vector index fail. ret =%d\n", vector_id);
+		return vector_id;
+	}
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+	type = en ?
+		HCLGE_MBX_MAP_RING_TO_VECTOR : HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+	req->msg[0] = type;
+	req->msg[1] = vector_id; /* vector_id should be id in VF */
+
+	i = 0;
+	for (node = ring_chain; node; node = node->next) {
+		i++;
+		/* msg[2] is cause num */
+		req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i] =
+				hnae_get_bit(node->flag, HNAE3_RING_TYPE_B);
+		req->msg[HCLGEVF_RING_NODE_VARIABLE_NUM * i + 1] =
+				node->tqp_index;
+		if (i == (HCLGE_MBX_VF_MSG_DATA_NUM -
+		    HCLGEVF_RING_MAP_MBX_BASIC_MSG_NUM) /
+		    HCLGEVF_RING_NODE_VARIABLE_NUM) {
+			req->msg[2] = i;
+
+			status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+			if (status) {
+				dev_err(&hdev->pdev->dev,
+					"Map TQP fail, status is %d.\n",
+					status);
+				return status;
+			}
+			i = 0;
+			hclgevf_cmd_setup_basic_desc(&desc,
+						     HCLGEVF_OPC_MBX_VF_TO_PF,
+						     false);
+			req->msg[0] = type;
+			req->msg[1] = vector_id;
+		}
+	}
+
+	if (i > 0) {
+		req->msg[2] = i;
+
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"Map TQP fail, status is %d.\n", status);
+			return status;
+		}
+	}
+
+	return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+				      struct hnae3_ring_chain_node *ring_chain)
+{
+	return hclgevf_bind_ring_to_vector(handle, true, vector, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+				struct hnae3_handle *handle,
+				int vector,
+				struct hnae3_ring_chain_node *ring_chain)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int ret, vector_id;
+
+	vector_id = hclgevf_get_vector_index(hdev, vector);
+	if (vector_id < 0) {
+		dev_err(&handle->pdev->dev,
+			"Get vector index fail. ret =%d\n", vector_id);
+		return vector_id;
+	}
+
+	ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
+	if (ret) {
+		dev_err(&handle->pdev->dev,
+			"Unmap ring from vector fail. vector=%d, ret =%d\n",
+			vector_id,
+			ret);
+		return ret;
+	}
+
+	hclgevf_free_vector(hdev, vector);
+
+	return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en)
+{
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+
+	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+	req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
+	req->msg[1] = en;
+
+	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"Set promisc mode fail, status is %d.\n", status);
+
+	return status;
+}
+
+static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	hclgevf_cmd_set_promisc_mode(hdev, en);
+}
+
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
+			      int stream_id, bool enable)
+{
+	struct hclgevf_cfg_com_tqp_queue_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+
+	req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
+				     false);
+	req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+	req->stream_id = cpu_to_le16(stream_id);
+	req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
+
+	status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+	if (status)
+		dev_err(&hdev->pdev->dev,
+			"TQP enable fail, status =%d.\n", status);
+
+	return status;
+}
+
+static int hclgevf_get_queue_id(struct hnae3_queue *queue)
+{
+	struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
+
+	return tqp->index;
+}
+
+static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	struct hnae3_queue *queue;
+	struct hclgevf_tqp *tqp;
+	int i;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		queue = handle->kinfo.tqp[i];
+		tqp = container_of(queue, struct hclgevf_tqp, q);
+		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+	}
+}
+
+static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 msg[2] = {0};
+
+	msg[0] = en;
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+				    HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE,
+				    msg, 1, false, NULL, 0);
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+	u8 *new_mac_addr = (u8 *)p;
+	u8 msg_data[ETH_ALEN * 2];
+	int status;
+
+	ether_addr_copy(msg_data, new_mac_addr);
+	ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
+
+	status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+				      HCLGE_MBX_MAC_VLAN_UC_MODIFY,
+				      msg_data, ETH_ALEN * 2,
+				      false, NULL, 0);
+	if (!status)
+		ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+	return status;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+			       const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+				    HCLGE_MBX_MAC_VLAN_UC_ADD,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+			      const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
+				    HCLGE_MBX_MAC_VLAN_UC_REMOVE,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+			       const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+				    HCLGE_MBX_MAC_VLAN_MC_ADD,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+			      const unsigned char *addr)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
+				    HCLGE_MBX_MAC_VLAN_MC_REMOVE,
+				    addr, ETH_ALEN, false, NULL, 0);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+				   __be16 proto, u16 vlan_id,
+				   bool is_kill)
+{
+#define HCLGEVF_VLAN_MBX_MSG_LEN 5
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
+
+	if (vlan_id > 4095)
+		return -EINVAL;
+
+	if (proto != htons(ETH_P_8021Q))
+		return -EPROTONOSUPPORT;
+
+	msg_data[0] = is_kill;
+	memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
+	memcpy(&msg_data[3], &proto, sizeof(proto));
+	return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+				    HCLGE_MBX_VLAN_FILTER, msg_data,
+				    HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
+}
+
+static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	u8 msg_data[2];
+
+	memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+	hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data, 2, false,
+			     NULL, 0);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+	return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+	vector->vector_irq = pci_irq_vector(hdev->pdev,
+					    HCLGEVF_MISC_VECTOR_NUM);
+	vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+	/* vector status always valid for Vector 0 */
+	hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+	hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+	hdev->num_msi_left -= 1;
+	hdev->num_msi_used += 1;
+}
+
+static void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+	if (!test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->mbx_service_task);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
+{
+	if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
+	    !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
+		schedule_work(&hdev->service_task);
+}
+
+static void hclgevf_service_timer(struct timer_list *t)
+{
+	struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
+
+	mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+
+	hclgevf_task_schedule(hdev);
+}
+
+static void hclgevf_mailbox_service_task(struct work_struct *work)
+{
+	struct hclgevf_dev *hdev;
+
+	hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
+
+	if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+		return;
+
+	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+
+	hclgevf_mbx_handler(hdev);
+
+	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+	struct hclgevf_dev *hdev;
+
+	hdev = container_of(work, struct hclgevf_dev, service_task);
+
+	/* request the link status from the PF. PF would be able to tell VF
+	 * about such updates in future so we might remove this later
+	 */
+	hclgevf_request_link_info(hdev);
+
+	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+	hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+{
+	u32 cmdq_src_reg;
+
+	/* fetch the events from their corresponding regs */
+	cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
+					HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+
+	/* check for vector0 mailbox(=CMDQ RX) event source */
+	if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+		cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+		*clearval = cmdq_src_reg;
+		return true;
+	}
+
+	dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
+
+	return false;
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+	writel(en ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+	struct hclgevf_dev *hdev = data;
+	u32 clearval;
+
+	hclgevf_enable_vector(&hdev->misc_vector, false);
+	if (!hclgevf_check_event_cause(hdev, &clearval))
+		goto skip_sched;
+
+	/* schedule the VF mailbox service task, if not already scheduled */
+	hclgevf_mbx_task_schedule(hdev);
+
+	hclgevf_clear_event_cause(hdev, clearval);
+
+skip_sched:
+	hclgevf_enable_vector(&hdev->misc_vector, true);
+
+	return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+	int ret;
+
+	/* get queue configuration from PF */
+	ret = hclge_get_queue_info(hdev);
+	if (ret)
+		return ret;
+	/* get tc configuration from PF */
+	return hclgevf_get_tc_info(hdev);
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+	struct hnae3_handle *roce = &hdev->roce;
+	struct hnae3_handle *nic = &hdev->nic;
+
+	roce->rinfo.num_vectors = HCLGEVF_ROCEE_VECTOR_NUM;
+
+	if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+	    hdev->num_msi_left == 0)
+		return -EINVAL;
+
+	roce->rinfo.base_vector =
+		hdev->vector_status[hdev->num_msi_used];
+
+	roce->rinfo.netdev = nic->kinfo.netdev;
+	roce->rinfo.roce_io_base = hdev->hw.io_base;
+
+	roce->pdev = nic->pdev;
+	roce->ae_algo = nic->ae_algo;
+	roce->numa_node_mask = nic->numa_node_mask;
+
+	return 0;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+	int i, ret;
+
+	rss_cfg->rss_size = hdev->rss_size_max;
+
+	/* Initialize RSS indirect table for each vport */
+	for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+		rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
+
+	ret = hclgevf_set_rss_indir_table(hdev);
+	if (ret)
+		return ret;
+
+	return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+	/* other vlan config(like, VLAN TX/RX offload) would also be added
+	 * here later
+	 */
+	return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+				       false);
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int i, queue_id;
+
+	for (i = 0; i < handle->kinfo.num_tqps; i++) {
+		/* ring enable */
+		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+		if (queue_id < 0) {
+			dev_warn(&hdev->pdev->dev,
+				 "Get invalid queue id, ignore it\n");
+			continue;
+		}
+
+		hclgevf_tqp_enable(hdev, queue_id, 0, true);
+	}
+
+	/* reset tqp stats */
+	hclgevf_reset_tqp_stats(handle);
+
+	hclgevf_request_link_info(hdev);
+
+	clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+	mod_timer(&hdev->service_timer, jiffies + HZ);
+
+	return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+	struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+	int i, queue_id;
+
+	for (i = 0; i < hdev->num_tqps; i++) {
+		/* Ring disable */
+		queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
+		if (queue_id < 0) {
+			dev_warn(&hdev->pdev->dev,
+				 "Get invalid queue id, ignore it\n");
+			continue;
+		}
+
+		hclgevf_tqp_enable(hdev, queue_id, 0, false);
+	}
+
+	/* reset tqp stats */
+	hclgevf_reset_tqp_stats(handle);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+	/* setup tasks for the MBX */
+	INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
+	clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+	clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+
+	/* setup tasks for service timer */
+	timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
+
+	INIT_WORK(&hdev->service_task, hclgevf_service_task);
+	clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
+
+	mutex_init(&hdev->mbx_resp.mbx_mutex);
+
+	/* bring the device down */
+	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+	set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+	if (hdev->service_timer.function)
+		del_timer_sync(&hdev->service_timer);
+	if (hdev->service_task.func)
+		cancel_work_sync(&hdev->service_task);
+	if (hdev->mbx_service_task.func)
+		cancel_work_sync(&hdev->mbx_service_task);
+
+	mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	int vectors;
+	int i;
+
+	hdev->num_msi = HCLGEVF_MAX_VF_VECTOR_NUM;
+
+	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+					PCI_IRQ_MSI | PCI_IRQ_MSIX);
+	if (vectors < 0) {
+		dev_err(&pdev->dev,
+			"failed(%d) to allocate MSI/MSI-X vectors\n",
+			vectors);
+		return vectors;
+	}
+	if (vectors < hdev->num_msi)
+		dev_warn(&hdev->pdev->dev,
+			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+			 hdev->num_msi, vectors);
+
+	hdev->num_msi = vectors;
+	hdev->num_msi_left = vectors;
+	hdev->base_msi_vector = pdev->irq;
+
+	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+					   sizeof(u16), GFP_KERNEL);
+	if (!hdev->vector_status) {
+		pci_free_irq_vectors(pdev);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < hdev->num_msi; i++)
+		hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+					sizeof(int), GFP_KERNEL);
+	if (!hdev->vector_irq) {
+		pci_free_irq_vectors(pdev);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+	int ret = 0;
+
+	hclgevf_get_misc_vector(hdev);
+
+	ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+			  0, "hclgevf_cmd", hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+			hdev->misc_vector.vector_irq);
+		return ret;
+	}
+
+	/* enable misc. vector(vector 0) */
+	hclgevf_enable_vector(&hdev->misc_vector, true);
+
+	return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+	/* disable misc vector(vector 0) */
+	hclgevf_enable_vector(&hdev->misc_vector, false);
+	free_irq(hdev->misc_vector.vector_irq, hdev);
+	hclgevf_free_vector(hdev, 0);
+}
+
+static int hclgevf_init_instance(struct hclgevf_dev *hdev,
+				 struct hnae3_client *client)
+{
+	int ret;
+
+	switch (client->type) {
+	case HNAE3_CLIENT_KNIC:
+		hdev->nic_client = client;
+		hdev->nic.client = client;
+
+		ret = client->ops->init_instance(&hdev->nic);
+		if (ret)
+			return ret;
+
+		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+			struct hnae3_client *rc = hdev->roce_client;
+
+			ret = hclgevf_init_roce_base_info(hdev);
+			if (ret)
+				return ret;
+			ret = rc->ops->init_instance(&hdev->roce);
+			if (ret)
+				return ret;
+		}
+		break;
+	case HNAE3_CLIENT_UNIC:
+		hdev->nic_client = client;
+		hdev->nic.client = client;
+
+		ret = client->ops->init_instance(&hdev->nic);
+		if (ret)
+			return ret;
+		break;
+	case HNAE3_CLIENT_ROCE:
+		hdev->roce_client = client;
+		hdev->roce.client = client;
+
+		if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+			ret = hclgevf_init_roce_base_info(hdev);
+			if (ret)
+				return ret;
+
+			ret = client->ops->init_instance(&hdev->roce);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void hclgevf_uninit_instance(struct hclgevf_dev *hdev,
+				    struct hnae3_client *client)
+{
+	/* un-init roce, if it exists */
+	if (hdev->roce_client)
+		hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+
+	/* un-init nic/unic, if this was not called by roce client */
+	if ((client->ops->uninit_instance) &&
+	    (client->type != HNAE3_CLIENT_ROCE))
+		client->ops->uninit_instance(&hdev->nic, 0);
+}
+
+static int hclgevf_register_client(struct hnae3_client *client,
+				   struct hnae3_ae_dev *ae_dev)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	return hclgevf_init_instance(hdev, client);
+}
+
+static void hclgevf_unregister_client(struct hnae3_client *client,
+				      struct hnae3_ae_dev *ae_dev)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	hclgevf_uninit_instance(hdev, client);
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+	struct hclgevf_hw *hw;
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to enable PCI device\n");
+		goto err_no_drvdata;
+	}
+
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret) {
+		dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+		goto err_disable_device;
+	}
+
+	ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+	if (ret) {
+		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+		goto err_disable_device;
+	}
+
+	pci_set_master(pdev);
+	hw = &hdev->hw;
+	hw->hdev = hdev;
+	hw->io_base = pci_iomap(pdev, 2, 0);
+	if (!hw->io_base) {
+		dev_err(&pdev->dev, "can't map configuration register space\n");
+		ret = -ENOMEM;
+		goto err_clr_master;
+	}
+
+	return 0;
+
+err_clr_master:
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+err_disable_device:
+	pci_disable_device(pdev);
+err_no_drvdata:
+	pci_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+	struct pci_dev *pdev = hdev->pdev;
+
+	pci_iounmap(pdev, hdev->hw.io_base);
+	pci_clear_master(pdev);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+	struct pci_dev *pdev = ae_dev->pdev;
+	struct hclgevf_dev *hdev;
+	int ret;
+
+	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+	if (!hdev)
+		return -ENOMEM;
+
+	hdev->pdev = pdev;
+	hdev->ae_dev = ae_dev;
+	ae_dev->priv = hdev;
+
+	ret = hclgevf_pci_init(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "PCI initialization failed\n");
+		return ret;
+	}
+
+	ret = hclgevf_init_msi(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+		goto err_irq_init;
+	}
+
+	hclgevf_state_init(hdev);
+
+	ret = hclgevf_misc_irq_init(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+			ret);
+		goto err_misc_irq_init;
+	}
+
+	ret = hclgevf_cmd_init(hdev);
+	if (ret)
+		goto err_cmd_init;
+
+	ret = hclgevf_configure(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+		goto err_config;
+	}
+
+	ret = hclgevf_alloc_tqps(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+		goto err_config;
+	}
+
+	ret = hclgevf_set_handle_info(hdev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
+		goto err_config;
+	}
+
+	ret = hclgevf_enable_tso(hdev, true);
+	if (ret) {
+		dev_err(&pdev->dev, "failed(%d) to enable tso\n", ret);
+		goto err_config;
+	}
+
+	/* Initialize VF's MTA */
+	hdev->accept_mta_mc = true;
+	ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to set mta filter mode\n", ret);
+		goto err_config;
+	}
+
+	/* Initialize RSS for this VF */
+	ret = hclgevf_rss_init_hw(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize RSS\n", ret);
+		goto err_config;
+	}
+
+	ret = hclgevf_init_vlan_config(hdev);
+	if (ret) {
+		dev_err(&hdev->pdev->dev,
+			"failed(%d) to initialize VLAN config\n", ret);
+		goto err_config;
+	}
+
+	pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+
+	return 0;
+
+err_config:
+	hclgevf_cmd_uninit(hdev);
+err_cmd_init:
+	hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+	hclgevf_state_uninit(hdev);
+	hclgevf_uninit_msi(hdev);
+err_irq_init:
+	hclgevf_pci_uninit(hdev);
+	return ret;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+	struct hclgevf_dev *hdev = ae_dev->priv;
+
+	hclgevf_cmd_uninit(hdev);
+	hclgevf_misc_irq_uninit(hdev);
+	hclgevf_state_uninit(hdev);
+	hclgevf_uninit_msi(hdev);
+	hclgevf_pci_uninit(hdev);
+	ae_dev->priv = NULL;
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+	.init_ae_dev = hclgevf_init_ae_dev,
+	.uninit_ae_dev = hclgevf_uninit_ae_dev,
+	.init_client_instance = hclgevf_register_client,
+	.uninit_client_instance = hclgevf_unregister_client,
+	.start = hclgevf_ae_start,
+	.stop = hclgevf_ae_stop,
+	.map_ring_to_vector = hclgevf_map_ring_to_vector,
+	.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+	.get_vector = hclgevf_get_vector,
+	.reset_queue = hclgevf_reset_tqp,
+	.set_promisc_mode = hclgevf_set_promisc_mode,
+	.get_mac_addr = hclgevf_get_mac_addr,
+	.set_mac_addr = hclgevf_set_mac_addr,
+	.add_uc_addr = hclgevf_add_uc_addr,
+	.rm_uc_addr = hclgevf_rm_uc_addr,
+	.add_mc_addr = hclgevf_add_mc_addr,
+	.rm_mc_addr = hclgevf_rm_mc_addr,
+	.get_stats = hclgevf_get_stats,
+	.update_stats = hclgevf_update_stats,
+	.get_strings = hclgevf_get_strings,
+	.get_sset_count = hclgevf_get_sset_count,
+	.get_rss_key_size = hclgevf_get_rss_key_size,
+	.get_rss_indir_size = hclgevf_get_rss_indir_size,
+	.get_rss = hclgevf_get_rss,
+	.set_rss = hclgevf_set_rss,
+	.get_tc_size = hclgevf_get_tc_size,
+	.get_fw_version = hclgevf_get_fw_version,
+	.set_vlan_filter = hclgevf_set_vlan_filter,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+	.ops = &hclgevf_ops,
+	.name = HCLGEVF_NAME,
+	.pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int hclgevf_init(void)
+{
+	pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+	return hnae3_register_ae_algo(&ae_algovf);
+}
+
+static void hclgevf_exit(void)
+{
+	hnae3_unregister_ae_algo(&ae_algovf);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 0000000..a63bee4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+
+#define HCLGEVF_MOD_VERSION "v1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_ROCEE_VECTOR_NUM	0
+#define HCLGEVF_MISC_VECTOR_NUM		0
+
+#define HCLGEVF_INVALID_VPORT		0xffff
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM	(32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE		0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE	0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET	0x4
+#define HCLGEVF_VECTOR_VF_OFFSET		0x100000
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGEVF_VECTOR0_CMDQ_SRC_REG	0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B	1
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES	10
+
+#define HCLGEVF_RSS_IND_TBL_SIZE		512
+#define HCLGEVF_RSS_SET_BITMAP_MSK	0xffff
+#define HCLGEVF_RSS_KEY_SIZE		40
+#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ	0
+#define HCLGEVF_RSS_HASH_ALGO_SIMPLE	1
+#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC	2
+#define HCLGEVF_RSS_HASH_ALGO_MASK	0xf
+#define HCLGEVF_RSS_CFG_TBL_NUM \
+	(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+	/* device states */
+	HCLGEVF_STATE_DOWN,
+	HCLGEVF_STATE_DISABLED,
+	/* task states */
+	HCLGEVF_STATE_SERVICE_SCHED,
+	HCLGEVF_STATE_MBX_SERVICE_SCHED,
+	HCLGEVF_STATE_MBX_HANDLING,
+};
+
+#define HCLGEVF_MPF_ENBALE 1
+
+struct hclgevf_mac {
+	u8 mac_addr[ETH_ALEN];
+	int link;
+};
+
+struct hclgevf_hw {
+	void __iomem *io_base;
+	int num_vec;
+	struct hclgevf_cmq cmq;
+	struct hclgevf_mac mac;
+	void *hdev; /* hchgevf device it is part of */
+};
+
+/* TQP stats */
+struct hlcgevf_tqp_stats {
+	/* query_tqp_tx_queue_statistics ,opcode id:  0x0B03 */
+	u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+	/* query_tqp_rx_queue_statistics ,opcode id:  0x0B13 */
+	u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclgevf_tqp {
+	struct device *dev;	/* device for DMA mapping */
+	struct hnae3_queue q;
+	struct hlcgevf_tqp_stats tqp_stats;
+	u16 index;		/* global index in a NIC controller */
+
+	bool alloced;
+};
+
+struct hclgevf_cfg {
+	u8 vmdq_vport_num;
+	u8 tc_num;
+	u16 tqp_desc_num;
+	u16 rx_buf_len;
+	u8 phy_addr;
+	u8 media_type;
+	u8 mac_addr[ETH_ALEN];
+	u32 numa_node_map;
+};
+
+struct hclgevf_rss_cfg {
+	u8  rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
+	u32 hash_algo;
+	u32 rss_size;
+	u8 hw_tc_map;
+	u8  rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+};
+
+struct hclgevf_misc_vector {
+	u8 __iomem *addr;
+	int vector_irq;
+};
+
+struct hclgevf_dev {
+	struct pci_dev *pdev;
+	struct hnae3_ae_dev *ae_dev;
+	struct hclgevf_hw hw;
+	struct hclgevf_misc_vector misc_vector;
+	struct hclgevf_rss_cfg rss_cfg;
+	unsigned long state;
+
+	u32 fw_version;
+	u16 num_tqps;		/* num task queue pairs of this PF */
+
+	u16 alloc_rss_size;	/* allocated RSS task queue */
+	u16 rss_size_max;	/* HW defined max RSS task queue */
+
+	u16 num_alloc_vport;	/* num vports this driver supports */
+	u32 numa_node_mask;
+	u16 rx_buf_len;
+	u16 num_desc;
+	u8 hw_tc_map;
+
+	u16 num_msi;
+	u16 num_msi_left;
+	u16 num_msi_used;
+	u32 base_msi_vector;
+	u16 *vector_status;
+	int *vector_irq;
+
+	bool accept_mta_mc; /* whether to accept mta filter multicast */
+	struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+
+	struct timer_list service_timer;
+	struct work_struct service_task;
+	struct work_struct mbx_service_task;
+
+	struct hclgevf_tqp *htqp;
+
+	struct hnae3_handle nic;
+	struct hnae3_handle roce;
+
+	struct hnae3_client *nic_client;
+	struct hnae3_client *roce_client;
+	u32 flag;
+};
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+			 const u8 *msg_data, u8 msg_len, bool need_resp,
+			 u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 0000000..e39cad2
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+	/* this function should be called with mbx_resp.mbx_mutex held
+	 * to prtect the received_response from race condition
+	 */
+	hdev->mbx_resp.received_resp  = false;
+	hdev->mbx_resp.origin_mbx_msg = 0;
+	hdev->mbx_resp.resp_status    = 0;
+	memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @resp_msg: pointer to store the original message type and response status
+ * @len: the resp_msg data array length.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+				u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES	500
+#define HCLGEVF_SLEEP_USCOEND	1000
+	struct hclgevf_mbx_resp_status *mbx_resp;
+	u16 r_code0, r_code1;
+	int i = 0;
+
+	if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+		dev_err(&hdev->pdev->dev,
+			"VF mbx response len(=%d) exceeds maximum(=%d)\n",
+			resp_len,
+			HCLGE_MBX_MAX_RESP_DATA_SIZE);
+		return -EINVAL;
+	}
+
+	while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+		udelay(HCLGEVF_SLEEP_USCOEND);
+		i++;
+	}
+
+	if (i >= HCLGEVF_MAX_TRY_TIMES) {
+		dev_err(&hdev->pdev->dev,
+			"VF could not get mbx resp(=%d) from PF in %d tries\n",
+			hdev->mbx_resp.received_resp, i);
+		return -EIO;
+	}
+
+	mbx_resp = &hdev->mbx_resp;
+	r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+	r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+	if (resp_data)
+		memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+	hclgevf_reset_mbx_resp_status(hdev);
+
+	if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+		dev_err(&hdev->pdev->dev,
+			"VF could not match resp code(code0=%d,code1=%d), %d",
+			code0, code1, mbx_resp->resp_status);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
+			 const u8 *msg_data, u8 msg_len, bool need_resp,
+			 u8 *resp_data, u16 resp_len)
+{
+	struct hclge_mbx_vf_to_pf_cmd *req;
+	struct hclgevf_desc desc;
+	int status;
+
+	req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+	/* first two bytes are reserved for code & subcode */
+	if (msg_len > (HCLGE_MBX_MAX_MSG_SIZE - 2)) {
+		dev_err(&hdev->pdev->dev,
+			"VF send mbx msg fail, msg len %d exceeds max len %d\n",
+			msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+		return -EINVAL;
+	}
+
+	hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+	req->msg[0] = code;
+	req->msg[1] = subcode;
+	memcpy(&req->msg[2], msg_data, msg_len);
+
+	/* synchronous send */
+	if (need_resp) {
+		mutex_lock(&hdev->mbx_resp.mbx_mutex);
+		hclgevf_reset_mbx_resp_status(hdev);
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"VF failed(=%d) to send mbx message to PF\n",
+				status);
+			mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+			return status;
+		}
+
+		status = hclgevf_get_mbx_resp(hdev, code, subcode, resp_data,
+					      resp_len);
+		mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+	} else {
+		/* asynchronous send */
+		status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+		if (status) {
+			dev_err(&hdev->pdev->dev,
+				"VF failed(=%d) to send mbx message to PF\n",
+				status);
+			return status;
+		}
+	}
+
+	return status;
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+	struct hclgevf_mbx_resp_status *resp;
+	struct hclge_mbx_pf_to_vf_cmd *req;
+	struct hclgevf_cmq_ring *crq;
+	struct hclgevf_desc *desc;
+	u16 link_status, flag;
+	u8 *temp;
+	int i;
+
+	resp = &hdev->mbx_resp;
+	crq = &hdev->hw.cmq.crq;
+
+	flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+	while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) {
+		desc = &crq->desc[crq->next_to_use];
+		req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+		switch (req->msg[0]) {
+		case HCLGE_MBX_PF_VF_RESP:
+			if (resp->received_resp)
+				dev_warn(&hdev->pdev->dev,
+					 "VF mbx resp flag not clear(%d)\n",
+					 req->msg[1]);
+			resp->received_resp = true;
+
+			resp->origin_mbx_msg = (req->msg[1] << 16);
+			resp->origin_mbx_msg |= req->msg[2];
+			resp->resp_status = req->msg[3];
+
+			temp = (u8 *)&req->msg[4];
+			for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
+				resp->additional_info[i] = *temp;
+				temp++;
+			}
+			break;
+		case HCLGE_MBX_LINK_STAT_CHANGE:
+			link_status = le16_to_cpu(req->msg[1]);
+
+			/* update upper layer with new link link status */
+			hclgevf_update_link_status(hdev, link_status);
+
+			break;
+		default:
+			dev_err(&hdev->pdev->dev,
+				"VF received unsupported(%d) mbx msg from PF\n",
+				req->msg[0]);
+			break;
+		}
+		hclge_mbx_ring_ptr_move_crq(crq);
+		flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+	}
+
+	/* Write back CMDQ_RQ header pointer, M7 need this pointer */
+	hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+			  crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 7feff24..71ddad1 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -199,18 +199,18 @@ static void __emac_set_multicast_list(struct emac_instance *dev);
 
 static inline int emac_phy_supports_gige(int phy_mode)
 {
-	return  phy_mode == PHY_MODE_GMII ||
-		phy_mode == PHY_MODE_RGMII ||
-		phy_mode == PHY_MODE_SGMII ||
-		phy_mode == PHY_MODE_TBI ||
-		phy_mode == PHY_MODE_RTBI;
+	return  phy_interface_mode_is_rgmii(phy_mode) ||
+		phy_mode == PHY_INTERFACE_MODE_GMII ||
+		phy_mode == PHY_INTERFACE_MODE_SGMII ||
+		phy_mode == PHY_INTERFACE_MODE_TBI ||
+		phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline int emac_phy_gpcs(int phy_mode)
 {
-	return  phy_mode == PHY_MODE_SGMII ||
-		phy_mode == PHY_MODE_TBI ||
-		phy_mode == PHY_MODE_RTBI;
+	return  phy_mode == PHY_INTERFACE_MODE_SGMII ||
+		phy_mode == PHY_INTERFACE_MODE_TBI ||
+		phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline void emac_tx_enable(struct emac_instance *dev)
@@ -2865,7 +2865,7 @@ static int emac_init_config(struct emac_instance *dev)
 	/* PHY mode needs some decoding */
 	dev->phy_mode = of_get_phy_mode(np);
 	if (dev->phy_mode < 0)
-		dev->phy_mode = PHY_MODE_NA;
+		dev->phy_mode = PHY_INTERFACE_MODE_NA;
 
 	/* Check EMAC version */
 	if (of_device_is_compatible(np, "ibm,emac4sync")) {
@@ -3168,7 +3168,7 @@ static int emac_probe(struct platform_device *ofdev)
 	printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n",
 	       ndev->name, dev->cell_index, np, ndev->dev_addr);
 
-	if (dev->phy_mode == PHY_MODE_SGMII)
+	if (dev->phy_mode == PHY_INTERFACE_MODE_SGMII)
 		printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
 
 	if (dev->phy.address >= 0)
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index 5afcc27..bc14dcf 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -104,19 +104,6 @@ struct emac_regs {
 	} u1;
 };
 
-/*
- * PHY mode settings (EMAC <-> ZMII/RGMII bridge <-> PHY)
- */
-#define PHY_MODE_NA	PHY_INTERFACE_MODE_NA
-#define PHY_MODE_MII	PHY_INTERFACE_MODE_MII
-#define PHY_MODE_RMII	PHY_INTERFACE_MODE_RMII
-#define PHY_MODE_SMII	PHY_INTERFACE_MODE_SMII
-#define PHY_MODE_RGMII	PHY_INTERFACE_MODE_RGMII
-#define PHY_MODE_TBI	PHY_INTERFACE_MODE_TBI
-#define PHY_MODE_GMII	PHY_INTERFACE_MODE_GMII
-#define PHY_MODE_RTBI	PHY_INTERFACE_MODE_RTBI
-#define PHY_MODE_SGMII	PHY_INTERFACE_MODE_SGMII
-
 /* EMACx_MR0 */
 #define EMAC_MR0_RXI			0x80000000
 #define EMAC_MR0_TXI			0x40000000
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index 35865d0..aa070c0 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -96,7 +96,7 @@ int emac_mii_reset_gpcs(struct mii_phy *phy)
 	if ((val & BMCR_ISOLATE) && limit > 0)
 		gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
 
-	if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
+	if (limit > 0 && phy->mode == PHY_INTERFACE_MODE_SGMII) {
 		/* Configure GPCS interface to recommended setting for SGMII */
 		gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
 		gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
@@ -313,16 +313,16 @@ static int cis8201_init(struct mii_phy *phy)
 	epcr &= ~EPCR_MODE_MASK;
 
 	switch (phy->mode) {
-	case PHY_MODE_TBI:
+	case PHY_INTERFACE_MODE_TBI:
 		epcr |= EPCR_TBI_MODE;
 		break;
-	case PHY_MODE_RTBI:
+	case PHY_INTERFACE_MODE_RTBI:
 		epcr |= EPCR_RTBI_MODE;
 		break;
-	case PHY_MODE_GMII:
+	case PHY_INTERFACE_MODE_GMII:
 		epcr |= EPCR_GMII_MODE;
 		break;
-	case PHY_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII:
 	default:
 		epcr |= EPCR_RGMII_MODE;
 	}
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index c4a1ac3..00f5999 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -52,43 +52,28 @@
 /* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
 static inline int rgmii_valid_mode(int phy_mode)
 {
-	return  phy_mode == PHY_MODE_GMII ||
-		phy_mode == PHY_MODE_MII ||
-		phy_mode == PHY_MODE_RGMII ||
-		phy_mode == PHY_MODE_TBI ||
-		phy_mode == PHY_MODE_RTBI;
-}
-
-static inline const char *rgmii_mode_name(int mode)
-{
-	switch (mode) {
-	case PHY_MODE_RGMII:
-		return "RGMII";
-	case PHY_MODE_TBI:
-		return "TBI";
-	case PHY_MODE_GMII:
-		return "GMII";
-	case PHY_MODE_MII:
-		return "MII";
-	case PHY_MODE_RTBI:
-		return "RTBI";
-	default:
-		BUG();
-	}
+	return  phy_interface_mode_is_rgmii(phy_mode) ||
+		phy_mode == PHY_INTERFACE_MODE_GMII ||
+		phy_mode == PHY_INTERFACE_MODE_MII ||
+		phy_mode == PHY_INTERFACE_MODE_TBI ||
+		phy_mode == PHY_INTERFACE_MODE_RTBI;
 }
 
 static inline u32 rgmii_mode_mask(int mode, int input)
 {
 	switch (mode) {
-	case PHY_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
+	case PHY_INTERFACE_MODE_RGMII_TXID:
 		return RGMII_FER_RGMII(input);
-	case PHY_MODE_TBI:
+	case PHY_INTERFACE_MODE_TBI:
 		return RGMII_FER_TBI(input);
-	case PHY_MODE_GMII:
+	case PHY_INTERFACE_MODE_GMII:
 		return RGMII_FER_GMII(input);
-	case PHY_MODE_MII:
+	case PHY_INTERFACE_MODE_MII:
 		return RGMII_FER_MII(input);
-	case PHY_MODE_RTBI:
+	case PHY_INTERFACE_MODE_RTBI:
 		return RGMII_FER_RTBI(input);
 	default:
 		BUG();
@@ -115,7 +100,7 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode)
 	out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
 
 	printk(KERN_NOTICE "%pOF: input %d in %s mode\n",
-	       ofdev->dev.of_node, input, rgmii_mode_name(mode));
+	       ofdev->dev.of_node, input, phy_modes(mode));
 
 	++dev->users;
 
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 89c42d3..fdcc734 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -49,20 +49,20 @@
  */
 static inline int zmii_valid_mode(int mode)
 {
-	return  mode == PHY_MODE_MII ||
-		mode == PHY_MODE_RMII ||
-		mode == PHY_MODE_SMII ||
-		mode == PHY_MODE_NA;
+	return  mode == PHY_INTERFACE_MODE_MII ||
+		mode == PHY_INTERFACE_MODE_RMII ||
+		mode == PHY_INTERFACE_MODE_SMII ||
+		mode == PHY_INTERFACE_MODE_NA;
 }
 
 static inline const char *zmii_mode_name(int mode)
 {
 	switch (mode) {
-	case PHY_MODE_MII:
+	case PHY_INTERFACE_MODE_MII:
 		return "MII";
-	case PHY_MODE_RMII:
+	case PHY_INTERFACE_MODE_RMII:
 		return "RMII";
-	case PHY_MODE_SMII:
+	case PHY_INTERFACE_MODE_SMII:
 		return "SMII";
 	default:
 		BUG();
@@ -72,11 +72,11 @@ static inline const char *zmii_mode_name(int mode)
 static inline u32 zmii_mode_mask(int mode, int input)
 {
 	switch (mode) {
-	case PHY_MODE_MII:
+	case PHY_INTERFACE_MODE_MII:
 		return ZMII_FER_MII(input);
-	case PHY_MODE_RMII:
+	case PHY_INTERFACE_MODE_RMII:
 		return ZMII_FER_RMII(input);
-	case PHY_MODE_SMII:
+	case PHY_INTERFACE_MODE_SMII:
 		return ZMII_FER_SMII(input);
 	default:
 		return 0;
@@ -106,27 +106,27 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode)
 	 * Please, always specify PHY mode in your board port to avoid
 	 * any surprises.
 	 */
-	if (dev->mode == PHY_MODE_NA) {
-		if (*mode == PHY_MODE_NA) {
+	if (dev->mode == PHY_INTERFACE_MODE_NA) {
+		if (*mode == PHY_INTERFACE_MODE_NA) {
 			u32 r = dev->fer_save;
 
 			ZMII_DBG(dev, "autodetecting mode, FER = 0x%08x" NL, r);
 
 			if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
-				dev->mode = PHY_MODE_MII;
+				dev->mode = PHY_INTERFACE_MODE_MII;
 			else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
-				dev->mode = PHY_MODE_RMII;
+				dev->mode = PHY_INTERFACE_MODE_RMII;
 			else
-				dev->mode = PHY_MODE_SMII;
-		} else
+				dev->mode = PHY_INTERFACE_MODE_SMII;
+		} else {
 			dev->mode = *mode;
-
+		}
 		printk(KERN_NOTICE "%pOF: bridge in %s mode\n",
 		       ofdev->dev.of_node,
 		       zmii_mode_name(dev->mode));
 	} else {
 		/* All inputs must use the same mode */
-		if (*mode != PHY_MODE_NA && *mode != dev->mode) {
+		if (*mode != PHY_INTERFACE_MODE_NA && *mode != dev->mode) {
 			printk(KERN_ERR
 			       "%pOF: invalid mode %d specified for input %d\n",
 			       ofdev->dev.of_node, *mode, input);
@@ -246,7 +246,7 @@ static int zmii_probe(struct platform_device *ofdev)
 
 	mutex_init(&dev->lock);
 	dev->ofdev = ofdev;
-	dev->mode = PHY_MODE_NA;
+	dev->mode = PHY_INTERFACE_MODE_NA;
 
 	rc = -ENXIO;
 	if (of_address_to_resource(np, 0, &regs)) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1dc4aef..461014b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -59,6 +59,7 @@
 #include <linux/mm.h>
 #include <linux/ethtool.h>
 #include <linux/proc_fs.h>
+#include <linux/if_arp.h>
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
@@ -1153,6 +1154,9 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
 			hdr_len[2] = tcp_hdrlen(skb);
 		else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
 			hdr_len[2] = sizeof(struct udphdr);
+	} else if (skb->protocol == htons(ETH_P_ARP)) {
+		hdr_len[1] = arp_hdr_len(skb->dev);
+		hdr_len[2] = 0;
 	}
 
 	memset(hdr_data, 0, 120);
@@ -1386,7 +1390,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 	/* determine if l2/3/4 headers are sent to firmware */
 	if ((*hdrs >> 7) & 1 &&
 	    (skb->protocol == htons(ETH_P_IP) ||
-	     skb->protocol == htons(ETH_P_IPV6))) {
+	     skb->protocol == htons(ETH_P_IPV6) ||
+	     skb->protocol == htons(ETH_P_ARP))) {
 		build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
 		tx_crq.v1.n_crq_elem = num_entries;
 		tx_buff->indir_arr[0] = tx_crq;
@@ -2448,6 +2453,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
 	struct ibmvnic_sub_crq_queue *scrq = instance;
 	struct ibmvnic_adapter *adapter = scrq->adapter;
 
+	/* When booting a kdump kernel we can hit pending interrupts
+	 * prior to completing driver initialization.
+	 */
+	if (unlikely(adapter->state != VNIC_OPEN))
+		return IRQ_NONE;
+
 	adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
 
 	if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
@@ -4285,7 +4296,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 	}
 
 	netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
-				   IBMVNIC_MAX_TX_QUEUES);
+				   IBMVNIC_MAX_QUEUES);
 	if (!netdev)
 		return -ENOMEM;
 
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4487f1e..2df79fd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -39,7 +39,7 @@
 #define IBMVNIC_RX_WEIGHT		16
 /* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
 #define IBMVNIC_BUFFS_PER_POOL	100
-#define IBMVNIC_MAX_TX_QUEUES	5
+#define IBMVNIC_MAX_QUEUES	10
 
 #define IBMVNIC_TSO_BUF_SZ	65536
 #define IBMVNIC_TSO_BUFS	64
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3b3983a..dc71e87 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
 			p = (char *)adapter + stat->stat_offset;
 			break;
 		default:
-			WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
-				  stat->type, i);
+			netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n",
+					 stat->type, i);
 			continue;
 		}
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index b0188b8..c577634 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -198,7 +198,7 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_add_mirror_rule	= 0x0260,
 	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
 
-	/* Pipeline Personalization Profile */
+	/* Dynamic Device Personalization */
 	i40e_aqc_opc_write_personalization_profile	= 0x0270,
 	i40e_aqc_opc_get_personalization_profile_list	= 0x0271,
 
@@ -1594,7 +1594,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
 
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
 struct i40e_aqc_write_personalization_profile {
 	u8      flags;
 	u8      reserved[3];
@@ -1605,7 +1605,7 @@ struct i40e_aqc_write_personalization_profile {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
 
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
 	__le32 error_offset;
 	__le32 error_info;
 	__le32 addr_high;
@@ -1614,8 +1614,8 @@ struct i40e_aqc_write_ppp_resp {
 
 struct i40e_aqc_get_applied_profiles {
 	u8      flags;
-#define I40E_AQC_GET_PPP_GET_CONF	0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF	0x2
+#define I40E_AQC_GET_DDP_GET_CONF	0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF	0x2
 	u8      rsv[3];
 	__le32  reserved;
 	__le32  addr_high;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 095965f..40c5f76 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -5236,7 +5236,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
 }
 
 /**
- * i40e_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
@@ -5246,7 +5246,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw,
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
 				   u16 buff_size, u32 track_id,
 				   u32 *error_offset, u32 *error_info,
 				   struct i40e_asq_cmd_details *cmd_details)
@@ -5255,7 +5255,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
 	struct i40e_aqc_write_personalization_profile *cmd =
 		(struct i40e_aqc_write_personalization_profile *)
 		&desc.params.raw;
-	struct i40e_aqc_write_ppp_resp *resp;
+	struct i40e_aqc_write_ddp_resp *resp;
 	i40e_status status;
 
 	i40e_fill_default_direct_cmd_desc(&desc,
@@ -5271,7 +5271,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
 
 	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 	if (!status) {
-		resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
 		if (error_offset)
 			*error_offset = le32_to_cpu(resp->error_offset);
 		if (error_info)
@@ -5282,14 +5282,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
 }
 
 /**
- * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 				      u16 buff_size, u8 flags,
 				      struct i40e_asq_cmd_details *cmd_details)
 {
@@ -5364,11 +5364,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 	u32 offset = 0, info = 0;
 	u32 i;
 
-	if (!track_id) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
-		return I40E_NOT_SUPPORTED;
-	}
-
 	dev_cnt = profile->device_table_count;
 
 	for (i = 0; i < dev_cnt; i++) {
@@ -5378,7 +5373,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 				break;
 	}
 	if (i == dev_cnt) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
 	}
 
@@ -5397,7 +5392,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 			sizeof(struct i40e_profile_section_header);
 
 		/* Write profile */
-		status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+		status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
 					   track_id, &offset, &info, NULL);
 		if (status) {
 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -5439,10 +5434,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
 					     sec->section.offset);
 	pinfo->track_id = track_id;
 	pinfo->version = profile->version;
-	pinfo->op = I40E_PPP_ADD_TRACKID;
-	memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+	pinfo->op = I40E_DDP_ADD_TRACKID;
+	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
 
-	status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end,
+	status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
 				   track_id, &offset, &info, NULL);
 
 	return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 5f6cf721..34173f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
 	I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
 	I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
 	I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+	I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx),
+	I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx),
+	I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx),
+	I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx),
 	I40E_PF_STAT("rx_size_64", stats.rx_size_64),
 	I40E_PF_STAT("rx_size_127", stats.rx_size_127),
 	I40E_PF_STAT("rx_size_255", stats.rx_size_255),
@@ -1585,6 +1589,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 			 */
 			rx_rings[i].desc = NULL;
 			rx_rings[i].rx_bi = NULL;
+			/* Clear cloned XDP RX-queue info before setup call */
+			memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq));
 			/* this is to allow wr32 to have something to write to
 			 * during early allocation of Rx buffers
 			 */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 42dcaef..2ab22eba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -47,8 +47,8 @@ static const char i40e_driver_string[] =
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 1
-#define DRV_VERSION_BUILD 14
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 2
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 7689c2e..425713f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -389,7 +389,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
 					   u16 *words, u16 *data)
 {
 	i40e_status ret_code;
-	u16 read_size = *words;
+	u16 read_size;
 	bool last_cmd = false;
 	u16 words_read = 0;
 	u16 i = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3bb6659..b3cc89c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -343,6 +343,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
 	return i40e_ptype_lookup[ptype];
 }
 
+/**
+ * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
+ * @link_speed: the speed to convert
+ *
+ * Returns the link_speed in terms of the virtchnl interface, for use in
+ * converting link_speed as reported by the AdminQ into the format used for
+ * talking to virtchnl devices. If we can't represent the link speed properly,
+ * report LINK_SPEED_UNKNOWN.
+ **/
+static inline enum virtchnl_link_speed
+i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
+{
+	switch (link_speed) {
+	case I40E_LINK_SPEED_100MB:
+		return VIRTCHNL_LINK_SPEED_100MB;
+	case I40E_LINK_SPEED_1GB:
+		return VIRTCHNL_LINK_SPEED_1GB;
+	case I40E_LINK_SPEED_10GB:
+		return VIRTCHNL_LINK_SPEED_10GB;
+	case I40E_LINK_SPEED_40GB:
+		return VIRTCHNL_LINK_SPEED_40GB;
+	case I40E_LINK_SPEED_20GB:
+		return VIRTCHNL_LINK_SPEED_20GB;
+	case I40E_LINK_SPEED_25GB:
+		return VIRTCHNL_LINK_SPEED_25GB;
+	case I40E_LINK_SPEED_UNKNOWN:
+	default:
+		return VIRTCHNL_LINK_SPEED_UNKNOWN;
+	}
+}
+
 /* prototype for functions used for SW locks */
 
 /* i40e_common for VF drivers*/
@@ -400,13 +431,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
 				    u32 time, u32 interval);
-i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
 			      u16 buff_size, u32 track_id,
 			      u32 *error_offset, u32 *error_info,
-			      struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+			      struct i40e_asq_cmd_details *
+			      cmd_details);
+i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 				 u16 buff_size, u8 flags,
-				 struct i40e_asq_cmd_details *cmd_details);
+				 struct i40e_asq_cmd_details *
+				 cmd_details);
 struct i40e_generic_seg_header *
 i40e_find_segment_in_package(u32 segment_type,
 			     struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 5bc2748..40edb6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -27,6 +27,7 @@
 #include <linux/prefetch.h>
 #include <net/busy_poll.h>
 #include <linux/bpf_trace.h>
+#include <net/xdp.h>
 #include "i40e.h"
 #include "i40e_trace.h"
 #include "i40e_prototype.h"
@@ -1236,6 +1237,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 {
 	i40e_clean_rx_ring(rx_ring);
+	if (rx_ring->vsi->type == I40E_VSI_MAIN)
+		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 	rx_ring->xdp_prog = NULL;
 	kfree(rx_ring->rx_bi);
 	rx_ring->rx_bi = NULL;
@@ -1256,6 +1259,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 {
 	struct device *dev = rx_ring->dev;
+	int err = -ENOMEM;
 	int bi_size;
 
 	/* warn if we are about to overwrite the pointer */
@@ -1283,13 +1287,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
+	/* XDP RX-queue info only needed for RX rings exposed to XDP */
+	if (rx_ring->vsi->type == I40E_VSI_MAIN) {
+		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+				       rx_ring->queue_index);
+		if (err < 0)
+			goto err;
+	}
+
 	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
 
 	return 0;
 err:
 	kfree(rx_ring->rx_bi);
 	rx_ring->rx_bi = NULL;
-	return -ENOMEM;
+	return err;
 }
 
 /**
@@ -2068,11 +2080,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 	struct sk_buff *skb = rx_ring->skb;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
 	bool failure = false, xdp_xmit = false;
+	struct xdp_buff xdp;
+
+	xdp.rxq = &rx_ring->xdp_rxq;
 
 	while (likely(total_rx_packets < (unsigned int)budget)) {
 		struct i40e_rx_buffer *rx_buffer;
 		union i40e_rx_desc *rx_desc;
-		struct xdp_buff xdp;
 		unsigned int size;
 		u16 vlan_tag;
 		u8 rx_ptype;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index fbae118..2d08760 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,6 +27,8 @@
 #ifndef _I40E_TXRX_H_
 #define _I40E_TXRX_H_
 
+#include <net/xdp.h>
+
 /* Interrupt Throttling and Rate Limiting Goodies */
 
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
@@ -428,6 +430,7 @@ struct i40e_ring {
 					 */
 
 	struct i40e_channel *ch;
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_internodealigned_in_smp;
 
 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 0e85687..5a708c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1502,19 +1502,19 @@ struct i40e_lldp_variables {
 #define I40E_FLEX_57_SHIFT		6
 #define I40E_FLEX_57_MASK		(0x1ULL << I40E_FLEX_57_SHIFT)
 
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
 	u8 major;
 	u8 minor;
 	u8 update;
 	u8 draft;
 };
 
-#define I40E_PPP_NAME_SIZE	32
+#define I40E_DDP_NAME_SIZE	32
 
 /* Package header */
 struct i40e_package_header {
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 segment_count;
 	u32 segment_offset[1];
 };
@@ -1526,16 +1526,16 @@ struct i40e_generic_seg_header {
 #define SEGMENT_TYPE_I40E	0x00000011
 #define SEGMENT_TYPE_X722	0x00000012
 	u32 type;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 size;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_metadata_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 track_id;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_device_id_entry {
@@ -1545,8 +1545,8 @@ struct i40e_device_id_entry {
 
 struct i40e_profile_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
-	char name[I40E_PPP_NAME_SIZE];
+	struct i40e_ddp_version version;
+	char name[I40E_DDP_NAME_SIZE];
 	u32 device_table_count;
 	struct i40e_device_id_entry device_table[1];
 };
@@ -1573,11 +1573,11 @@ struct i40e_profile_section_header {
 
 struct i40e_profile_info {
 	u32 track_id;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u8 op;
-#define I40E_PPP_ADD_TRACKID		0x01
-#define I40E_PPP_REMOVE_TRACKID	0x02
+#define I40E_DDP_ADD_TRACKID		0x01
+#define I40E_DDP_REMOVE_TRACKID	0x02
 	u8 reserved[7];
-	u8 name[I40E_PPP_NAME_SIZE];
+	u8 name[I40E_DDP_NAME_SIZE];
 };
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 36cb8e0..e9309fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
 	if (vf->link_forced) {
 		pfe.event_data.link_event.link_status = vf->link_up;
 		pfe.event_data.link_event.link_speed =
-			(vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+			(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
 	} else {
 		pfe.event_data.link_event.link_status =
 			ls->link_info & I40E_AQ_LINK_UP;
 		pfe.event_data.link_event.link_speed =
-			(enum virtchnl_link_speed)ls->link_speed;
+			i40e_virtchnl_link_speed(ls->link_speed);
 	}
 	i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
 			       0, (u8 *)&pfe, sizeof(pfe), NULL);
@@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
 		break;
 	case VIRTCHNL_OP_GET_VF_RESOURCES:
 		ret = i40e_vc_get_vf_resources_msg(vf, msg);
+		i40e_vc_notify_vf_link_state(vf);
 		break;
 	case VIRTCHNL_OP_RESET_VF:
 		i40e_vc_reset_vf_msg(vf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 06b0457..435a112 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -198,7 +198,7 @@ enum i40e_admin_queue_opc {
 	i40e_aqc_opc_add_mirror_rule	= 0x0260,
 	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
 
-	/* Pipeline Personalization Profile */
+	/* Dynamic Device Personalization */
 	i40e_aqc_opc_write_personalization_profile	= 0x0270,
 	i40e_aqc_opc_get_personalization_profile_list	= 0x0271,
 
@@ -1562,7 +1562,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
 
-/* Pipeline Personalization Profile */
+/* Dynamic Device Personalization */
 struct i40e_aqc_write_personalization_profile {
 	u8      flags;
 	u8      reserved[3];
@@ -1573,7 +1573,7 @@ struct i40e_aqc_write_personalization_profile {
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
 
-struct i40e_aqc_write_ppp_resp {
+struct i40e_aqc_write_ddp_resp {
 	__le32 error_offset;
 	__le32 error_info;
 	__le32 addr_high;
@@ -1582,8 +1582,8 @@ struct i40e_aqc_write_ppp_resp {
 
 struct i40e_aqc_get_applied_profiles {
 	u8      flags;
-#define I40E_AQC_GET_PPP_GET_CONF	0x1
-#define I40E_AQC_GET_PPP_GET_RDPU_CONF	0x2
+#define I40E_AQC_GET_DDP_GET_CONF	0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF	0x2
 	u8      rsv[3];
 	__le32  reserved;
 	__le32  addr_high;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7d70bf6..a946484 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1202,7 +1202,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
 }
 
 /**
- * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp)
+ * i40evf_aq_write_ddp - Write dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
@@ -1212,7 +1212,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw)
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
 				     u16 buff_size, u32 track_id,
 				     u32 *error_offset, u32 *error_info,
 				     struct i40e_asq_cmd_details *cmd_details)
@@ -1221,7 +1221,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
 	struct i40e_aqc_write_personalization_profile *cmd =
 		(struct i40e_aqc_write_personalization_profile *)
 		&desc.params.raw;
-	struct i40e_aqc_write_ppp_resp *resp;
+	struct i40e_aqc_write_ddp_resp *resp;
 	i40e_status status;
 
 	i40evf_fill_default_direct_cmd_desc(&desc,
@@ -1237,7 +1237,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
 
 	status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
 	if (!status) {
-		resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw;
+		resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
 		if (error_offset)
 			*error_offset = le32_to_cpu(resp->error_offset);
 		if (error_info)
@@ -1248,16 +1248,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
 }
 
 /**
- * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp)
+ * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp)
  * @hw: pointer to the hw struct
  * @buff: command buffer (size in bytes = buff_size)
  * @buff_size: buffer size in bytes
  * @cmd_details: pointer to command details structure or NULL
  **/
 enum
-i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 					u16 buff_size, u8 flags,
-				      struct i40e_asq_cmd_details *cmd_details)
+				       struct i40e_asq_cmd_details *cmd_details)
 {
 	struct i40e_aq_desc desc;
 	struct i40e_aqc_get_applied_profiles *cmd =
@@ -1330,11 +1330,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 	u32 offset = 0, info = 0;
 	u32 i;
 
-	if (!track_id) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
-		return I40E_NOT_SUPPORTED;
-	}
-
 	dev_cnt = profile->device_table_count;
 
 	for (i = 0; i < dev_cnt; i++) {
@@ -1344,7 +1339,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 				break;
 	}
 	if (i == dev_cnt) {
-		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP");
+		i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
 	}
 
@@ -1363,7 +1358,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
 			sizeof(struct i40e_profile_section_header);
 
 		/* Write profile */
-		status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size,
+		status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size,
 					     track_id, &offset, &info, NULL);
 		if (status) {
 			i40e_debug(hw, I40E_DEBUG_PACKAGE,
@@ -1405,10 +1400,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw,
 					     sec->section.offset);
 	pinfo->track_id = track_id;
 	pinfo->version = profile->version;
-	pinfo->op = I40E_PPP_ADD_TRACKID;
-	memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE);
+	pinfo->op = I40E_DDP_ADD_TRACKID;
+	memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
 
-	status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end,
+	status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end,
 				     track_id, &offset, &info, NULL);
 	return status;
 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index b624b59..47c4299 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
 				    u32 time, u32 interval);
-i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff,
+i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff,
 				u16 buff_size, u32 track_id,
 				u32 *error_offset, u32 *error_info,
-				struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff,
+				struct i40e_asq_cmd_details *
+				cmd_details);
+i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
 				   u16 buff_size, u8 flags,
-				   struct i40e_asq_cmd_details *cmd_details);
+				   struct i40e_asq_cmd_details *
+				   cmd_details);
 struct i40e_generic_seg_header *
 i40evf_find_segment_in_package(u32 segment_type,
 			       struct i40e_package_header *pkg_header);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 213b773..6afc316 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1422,19 +1422,19 @@ enum i40e_reset_type {
 #define I40E_FD_INSET_FLEX_WORD57_MASK		(0x1ULL << \
 					I40E_FD_INSET_FLEX_WORD57_SHIFT)
 
-/* Version format for PPP */
-struct i40e_ppp_version {
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
 	u8 major;
 	u8 minor;
 	u8 update;
 	u8 draft;
 };
 
-#define I40E_PPP_NAME_SIZE	32
+#define I40E_DDP_NAME_SIZE	32
 
 /* Package header */
 struct i40e_package_header {
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 segment_count;
 	u32 segment_offset[1];
 };
@@ -1446,16 +1446,16 @@ struct i40e_generic_seg_header {
 #define SEGMENT_TYPE_I40E	0x00000011
 #define SEGMENT_TYPE_X722	0x00000012
 	u32 type;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 size;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_metadata_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u32 track_id;
-	char name[I40E_PPP_NAME_SIZE];
+	char name[I40E_DDP_NAME_SIZE];
 };
 
 struct i40e_device_id_entry {
@@ -1465,8 +1465,8 @@ struct i40e_device_id_entry {
 
 struct i40e_profile_segment {
 	struct i40e_generic_seg_header header;
-	struct i40e_ppp_version version;
-	char name[I40E_PPP_NAME_SIZE];
+	struct i40e_ddp_version version;
+	char name[I40E_DDP_NAME_SIZE];
 	u32 device_table_count;
 	struct i40e_device_id_entry device_table[1];
 };
@@ -1493,11 +1493,11 @@ struct i40e_profile_section_header {
 
 struct i40e_profile_info {
 	u32 track_id;
-	struct i40e_ppp_version version;
+	struct i40e_ddp_version version;
 	u8 op;
-#define I40E_PPP_ADD_TRACKID		0x01
-#define I40E_PPP_REMOVE_TRACKID	0x02
+#define I40E_DDP_ADD_TRACKID		0x01
+#define I40E_DDP_REMOVE_TRACKID	0x02
 	u8 reserved[7];
-	u8 name[I40E_PPP_NAME_SIZE];
+	u8 name[I40E_DDP_NAME_SIZE];
 };
 #endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index de0af52..47040ab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -199,6 +199,9 @@ struct i40evf_adapter {
 	wait_queue_head_t down_waitqueue;
 	struct i40e_q_vector *q_vectors;
 	struct list_head vlan_filter_list;
+	struct list_head mac_filter_list;
+	/* Lock to protect accesses to MAC and VLAN lists */
+	spinlock_t mac_vlan_list_lock;
 	char misc_vector_name[IFNAMSIZ + 9];
 	int num_active_queues;
 	int num_req_queues;
@@ -206,7 +209,6 @@ struct i40evf_adapter {
 	/* TX */
 	struct i40e_ring *tx_rings;
 	u32 tx_timeout_count;
-	struct list_head mac_filter_list;
 	u32 tx_desc_count;
 
 	/* RX */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 7b2a4eb..f92587a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -45,8 +45,8 @@ static const char i40evf_driver_string[] =
 #define DRV_KERN "-k"
 
 #define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 0
-#define DRV_VERSION_BUILD 1
+#define DRV_VERSION_MINOR 2
+#define DRV_VERSION_BUILD 2
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD) \
@@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
 		if (mask & BIT(i - 1)) {
 			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
 			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			     I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
-		}
-	}
-}
-
-/**
- * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
- * @adapter: board private structure
- * @mask: bitmap of vectors to trigger
- **/
-static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
-{
-	struct i40e_hw *hw = &adapter->hw;
-	int i;
-	u32 dyn_ctl;
-
-	if (mask & 1) {
-		dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
-		dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
-			   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
-		wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
-	}
-	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & BIT(i)) {
-			dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
-			dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
-				   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-				   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
-			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
+			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
 		}
 	}
 }
@@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data)
 	struct net_device *netdev = data;
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 	struct i40e_hw *hw = &adapter->hw;
-	u32 val;
 
 	/* handle non-queue interrupts, these reads clear the registers */
-	val = rd32(hw, I40E_VFINT_ICR01);
-	val = rd32(hw, I40E_VFINT_ICR0_ENA1);
-
-	val = rd32(hw, I40E_VFINT_DYN_CTL01) |
-	      I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
-	wr32(hw, I40E_VFINT_DYN_CTL01, val);
+	rd32(hw, I40E_VFINT_ICR01);
+	rd32(hw, I40E_VFINT_ICR0_ENA1);
 
 	/* schedule work on the private workqueue */
 	schedule_work(&adapter->adminq_task);
@@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
  * @adapter: board private structure
  * @vlan: vlan tag
  *
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
  **/
 static struct
 i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan)
@@ -731,14 +697,8 @@ static struct
 i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
 	struct i40evf_vlan_filter *f = NULL;
-	int count = 50;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0)
-			goto out;
-	}
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 
 	f = i40evf_find_vlan(adapter, vlan);
 	if (!f) {
@@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 	}
 
 clearout:
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-out:
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 	return f;
 }
 
@@ -768,21 +727,16 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
 static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
 {
 	struct i40evf_vlan_filter *f;
-	int count = 50;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0)
-			return;
-	}
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 
 	f = i40evf_find_vlan(adapter, vlan);
 	if (f) {
 		f->remove = true;
 		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
 	}
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 }
 
 /**
@@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
  * @adapter: board private structure
  * @macaddr: the MAC address
  *
- * Returns ptr to the filter object or NULL
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_vlan_list_lock.
  **/
 static struct
 i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
@@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
 				     u8 *macaddr)
 {
 	struct i40evf_mac_filter *f;
-	int count = 50;
 
 	if (!macaddr)
 		return NULL;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0)
-			return NULL;
-	}
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 
 	f = i40evf_find_filter(adapter, macaddr);
 	if (!f) {
 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
-		if (!f) {
-			clear_bit(__I40EVF_IN_CRITICAL_TASK,
-				  &adapter->crit_section);
-			return NULL;
-		}
+		if (!f)
+			goto clearout;
 
 		ether_addr_copy(f->macaddr, macaddr);
 
@@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
 		f->remove = false;
 	}
 
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+clearout:
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 	return f;
 }
 
@@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
 	if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
 		return -EPERM;
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	f = i40evf_find_filter(adapter, hw->mac.addr);
 	if (f) {
 		f->remove = true;
 		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
 	}
 
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	f = i40evf_add_filter(adapter, addr->sa_data);
 	if (f) {
 		ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
 	struct netdev_hw_addr *uca;
 	struct netdev_hw_addr *mca;
 	struct netdev_hw_addr *ha;
-	int count = 50;
 
 	/* add addr if not already in the filter list */
 	netdev_for_each_uc_addr(uca, netdev) {
@@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
 		i40evf_add_filter(adapter, mca->addr);
 	}
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section)) {
-		udelay(1);
-		if (--count == 0) {
-			dev_err(&adapter->pdev->dev,
-				"Failed to get lock in %s\n", __func__);
-			return;
-		}
-	}
-	/* remove filter if not in netdev list */
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
 		netdev_for_each_mc_addr(mca, netdev)
 			if (ether_addr_equal(mca->addr, f->macaddr))
@@ -995,7 +937,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
 		 adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
 		adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
 
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
 }
 
 /**
@@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
 /**
  * i40evf_up_complete - Finish the last steps of bringing up a connection
  * @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
  **/
 static void i40evf_up_complete(struct i40evf_adapter *adapter)
 {
@@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
 /**
  * i40e_down - Shutdown the connection processing
  * @adapter: board private structure
+ *
+ * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock.
  **/
 void i40evf_down(struct i40evf_adapter *adapter)
 {
@@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter)
 	if (adapter->state <= __I40EVF_DOWN_PENDING)
 		return;
 
-	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
-				&adapter->crit_section))
-		usleep_range(500, 1000);
-
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
 	adapter->link_up = false;
 	i40evf_napi_disable_all(adapter);
 	i40evf_irq_disable(adapter);
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	/* remove all MAC filters */
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		f->remove = true;
@@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		f->remove = true;
 	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
 	    adapter->state != __I40EVF_RESETTING) {
 		/* cancel any current operation */
@@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter)
 		adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
 	}
 
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 	mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
 }
 
@@ -1770,13 +1716,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
 	if (adapter->state == __I40EVF_RUNNING)
 		i40evf_request_stats(adapter);
 watchdog_done:
-	if (adapter->state == __I40EVF_RUNNING) {
-		i40evf_irq_enable_queues(adapter, ~0);
-		i40evf_fire_sw_int(adapter, 0xFF);
-	} else {
-		i40evf_fire_sw_int(adapter, 0x1);
-	}
-
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 restart_watchdog:
 	if (adapter->state == __I40EVF_REMOVE)
@@ -1796,7 +1735,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
 
 	adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
 
-	if (netif_running(adapter->netdev)) {
+	/* We don't use netif_running() because it may be true prior to
+	 * ndo_open() returning, so we can't assume it means all our open
+	 * tasks have finished, since we're not holding the rtnl_lock here.
+	 */
+	if (adapter->state == __I40EVF_RUNNING) {
 		set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
 		netif_carrier_off(adapter->netdev);
 		netif_tx_disable(adapter->netdev);
@@ -1808,6 +1751,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
 		i40evf_free_all_rx_resources(adapter);
 	}
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	/* Delete all of the filters, both MAC and VLAN. */
 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
 		list_del(&f->list);
@@ -1819,6 +1764,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
 		kfree(fv);
 	}
 
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_free_misc_irq(adapter);
 	i40evf_reset_interrupt_capability(adapter);
 	i40evf_free_queues(adapter);
@@ -1854,6 +1801,7 @@ static void i40evf_reset_task(struct work_struct *work)
 	struct i40evf_mac_filter *f;
 	u32 reg_val;
 	int i = 0, err;
+	bool running;
 
 	while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
 				&adapter->crit_section))
@@ -1913,7 +1861,13 @@ static void i40evf_reset_task(struct work_struct *work)
 	}
 
 continue_reset:
-	if (netif_running(netdev)) {
+	/* We don't use netif_running() because it may be true prior to
+	 * ndo_open() returning, so we can't assume it means all our open
+	 * tasks have finished, since we're not holding the rtnl_lock here.
+	 */
+	running = (adapter->state == __I40EVF_RUNNING);
+
+	if (running) {
 		netif_carrier_off(netdev);
 		netif_tx_stop_all_queues(netdev);
 		adapter->link_up = false;
@@ -1948,6 +1902,8 @@ static void i40evf_reset_task(struct work_struct *work)
 	adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG;
 	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	/* re-add all MAC filters */
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		f->add = true;
@@ -1956,15 +1912,19 @@ static void i40evf_reset_task(struct work_struct *work)
 	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
 		vlf->add = true;
 	}
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
-	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
-	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
 	i40evf_misc_irq_enable(adapter);
 
 	mod_timer(&adapter->watchdog_timer, jiffies + 2);
 
-	if (netif_running(adapter->netdev)) {
+	/* We were running when the reset started, so we need to restore some
+	 * state here.
+	 */
+	if (running) {
 		/* allocate transmit descriptors */
 		err = i40evf_setup_all_tx_resources(adapter);
 		if (err)
@@ -1993,9 +1953,13 @@ static void i40evf_reset_task(struct work_struct *work)
 		adapter->state = __I40EVF_DOWN;
 		wake_up(&adapter->down_waitqueue);
 	}
+	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 
 	return;
 reset_err:
+	clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
 	i40evf_close(netdev);
 }
@@ -2239,8 +2203,14 @@ static int i40evf_open(struct net_device *netdev)
 		return -EIO;
 	}
 
-	if (adapter->state != __I40EVF_DOWN)
-		return -EBUSY;
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+
+	if (adapter->state != __I40EVF_DOWN) {
+		err = -EBUSY;
+		goto err_unlock;
+	}
 
 	/* allocate transmit descriptors */
 	err = i40evf_setup_all_tx_resources(adapter);
@@ -2264,6 +2234,8 @@ static int i40evf_open(struct net_device *netdev)
 
 	i40evf_irq_enable(adapter, true);
 
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
 	return 0;
 
 err_req_irq:
@@ -2273,6 +2245,8 @@ static int i40evf_open(struct net_device *netdev)
 	i40evf_free_all_rx_resources(adapter);
 err_setup_tx:
 	i40evf_free_all_tx_resources(adapter);
+err_unlock:
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 
 	return err;
 }
@@ -2296,6 +2270,9 @@ static int i40evf_close(struct net_device *netdev)
 	if (adapter->state <= __I40EVF_DOWN_PENDING)
 		return 0;
 
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
 
 	set_bit(__I40E_VSI_DOWN, adapter->vsi.state);
 	if (CLIENT_ENABLED(adapter))
@@ -2305,6 +2282,8 @@ static int i40evf_close(struct net_device *netdev)
 	adapter->state = __I40EVF_DOWN_PENDING;
 	i40evf_free_traffic_irqs(adapter);
 
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
 	/* We explicitly don't free resources here because the hardware is
 	 * still active and can DMA into memory. Resources are cleared in
 	 * i40evf_virtchnl_completion() after we get confirmation from the PF
@@ -2943,6 +2922,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	mutex_init(&hw->aq.asq_mutex);
 	mutex_init(&hw->aq.arq_mutex);
 
+	spin_lock_init(&adapter->mac_vlan_list_lock);
+
 	INIT_LIST_HEAD(&adapter->mac_filter_list);
 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
 
@@ -2985,6 +2966,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
 
 	netif_device_detach(netdev);
 
+	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+				&adapter->crit_section))
+		usleep_range(500, 1000);
+
 	if (netif_running(netdev)) {
 		rtnl_lock();
 		i40evf_down(adapter);
@@ -2993,6 +2978,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state)
 	i40evf_free_misc_irq(adapter);
 	i40evf_reset_interrupt_capability(adapter);
 
+	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
 	retval = pci_save_state(pdev);
 	if (retval)
 		return retval;
@@ -3118,6 +3105,7 @@ static void i40evf_remove(struct pci_dev *pdev)
 	i40evf_free_all_rx_resources(adapter);
 	i40evf_free_queues(adapter);
 	kfree(adapter->vf_res);
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
 	/* If we got removed before an up/down sequence, we've got a filter
 	 * hanging out there that we need to get rid of.
 	 */
@@ -3130,6 +3118,8 @@ static void i40evf_remove(struct pci_dev *pdev)
 		kfree(f);
 	}
 
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	free_netdev(netdev);
 
 	pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 46c8b8a..feb95b6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
 			adapter->current_op);
 		return;
 	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		if (f->add)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
@@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
 	}
 
 	veal = kzalloc(len, GFP_KERNEL);
-	if (!veal)
+	if (!veal) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	veal->vsi_id = adapter->vsi_res->vsi_id;
 	veal->num_elements = count;
@@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
 			   (u8 *)veal, len);
 	kfree(veal);
@@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
 			adapter->current_op);
 		return;
 	}
+
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
 		if (f->remove)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
@@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
 		more = true;
 	}
 	veal = kzalloc(len, GFP_KERNEL);
-	if (!veal)
+	if (!veal) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	veal->vsi_id = adapter->vsi_res->vsi_id;
 	veal->num_elements = count;
@@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
 			   (u8 *)veal, len);
 	kfree(veal);
@@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
 		return;
 	}
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		if (f->add)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
@@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
 		more = true;
 	}
 	vvfl = kzalloc(len, GFP_KERNEL);
-	if (!vvfl)
+	if (!vvfl) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
 	vvfl->num_elements = count;
@@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
 	kfree(vvfl);
 }
@@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
 		return;
 	}
 
+	spin_lock_bh(&adapter->mac_vlan_list_lock);
+
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		if (f->remove)
 			count++;
 	}
 	if (!count) {
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
 	}
 	adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
@@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
 		more = true;
 	}
 	vvfl = kzalloc(len, GFP_KERNEL);
-	if (!vvfl)
+	if (!vvfl) {
+		spin_unlock_bh(&adapter->mac_vlan_list_lock);
 		return;
+	}
 
 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
 	vvfl->num_elements = count;
@@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
 	}
 	if (!more)
 		adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+	spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
 	i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
 	kfree(vvfl);
 }
@@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
 	}
 
 	if (!flags) {
-		adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
-		adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+		adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON |
+				    I40EVF_FLAG_ALLMULTI_ON);
+		adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC |
+					  I40EVF_FLAG_AQ_RELEASE_ALLMULTI);
 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
 	}
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 468c355..03a4df0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -53,6 +53,7 @@
 #include <linux/dca.h>
 #endif
 
+#include <net/xdp.h>
 #include <net/busy_poll.h>
 
 /* common prefix used by pr_<> macros */
@@ -332,7 +333,6 @@ struct ixgbe_ring {
 	struct net_device *netdev;	/* netdev ring belongs to */
 	struct bpf_prog *xdp_prog;
 	struct device *dev;		/* device for DMA mapping */
-	struct ixgbe_fwd_adapter *l2_accel_priv;
 	void *desc;			/* descriptor ring memory */
 	union {
 		struct ixgbe_tx_buffer *tx_buffer_info;
@@ -371,6 +371,7 @@ struct ixgbe_ring {
 		struct ixgbe_tx_queue_stats tx_stats;
 		struct ixgbe_rx_queue_stats rx_stats;
 	};
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
@@ -395,8 +396,7 @@ enum ixgbe_ring_f_enum {
 #define MAX_XDP_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
 #define IXGBE_MAX_L2A_QUEUES		4
 #define IXGBE_BAD_L2A_QUEUE		3
-#define IXGBE_MAX_MACVLANS		31
-#define IXGBE_MAX_DCBMACVLANS		8
+#define IXGBE_MAX_MACVLANS		63
 
 struct ixgbe_ring_feature {
 	u16 limit;	/* upper limit on feature indices */
@@ -721,8 +721,7 @@ struct ixgbe_adapter {
 
 	u16 bridge_mode;
 
-	u16 eeprom_verh;
-	u16 eeprom_verl;
+	char eeprom_id[NVM_VER_SIZE];
 	u16 eeprom_cap;
 
 	u32 interrupt_event;
@@ -766,7 +765,8 @@ struct ixgbe_adapter {
 #endif /*CONFIG_DEBUG_FS*/
 
 	u8 default_up;
-	unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
+	/* Bitmask indicating in use pools */
+	DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
 
 #define IXGBE_MAX_LINK_HANDLE 10
 	struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bef255..1948e42 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -4028,6 +4028,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
 	return 0;
 }
 
+/**
+ *  ixgbe_get_orom_version - Return option ROM from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  if valid option ROM version, nvm_ver->or_valid set to true
+ *  else nvm_ver->or_valid is false.
+ **/
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+			    struct ixgbe_nvm_version *nvm_ver)
+{
+	u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
+
+	nvm_ver->or_valid = false;
+	/* Option Rom may or may not be present.  Start with pointer */
+	hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
+
+	/* make sure offset is valid */
+	if (offset == 0x0 || offset == NVM_INVALID_PTR)
+		return;
+
+	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
+	hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
+
+	/* option rom exists and is valid */
+	if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
+	    eeprom_cfg_blkl == NVM_VER_INVALID ||
+	    eeprom_cfg_blkh == NVM_VER_INVALID)
+		return;
+
+	nvm_ver->or_valid = true;
+	nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
+	nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
+			    (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
+	nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
+}
+
+/**
+ *  ixgbe_get_oem_prod_version Etrack ID from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  if valid OEM product version, nvm_ver->oem_valid set to true
+ *  else nvm_ver->oem_valid is false.
+ **/
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+				struct ixgbe_nvm_version *nvm_ver)
+{
+	u16 rel_num, prod_ver, mod_len, cap, offset;
+
+	nvm_ver->oem_valid = false;
+	hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
+
+	/* Return is offset to OEM Product Version block is invalid */
+	if (offset == 0x0 && offset == NVM_INVALID_PTR)
+		return;
+
+	/* Read product version block */
+	hw->eeprom.ops.read(hw, offset, &mod_len);
+	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
+
+	/* Return if OEM product version block is invalid */
+	if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
+	    (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
+		return;
+
+	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
+	hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
+
+	/* Return if version is invalid */
+	if ((rel_num | prod_ver) == 0x0 ||
+	    rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
+		return;
+
+	nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
+	nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
+	nvm_ver->oem_release = rel_num;
+	nvm_ver->oem_valid = true;
+}
+
+/**
+ *  ixgbe_get_etk_id - Return Etrack ID from EEPROM
+ *
+ *  @hw: pointer to hardware structure
+ *  @nvm_ver: pointer to output structure
+ *
+ *  word read errors will return 0xFFFF
+ **/
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+		      struct ixgbe_nvm_version *nvm_ver)
+{
+	u16 etk_id_l, etk_id_h;
+
+	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
+		etk_id_l = NVM_VER_INVALID;
+	if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
+		etk_id_h = NVM_VER_INVALID;
+
+	/* The word order for the version format is determined by high order
+	 * word bit 15.
+	 */
+	if ((etk_id_h & NVM_ETK_VALID) == 0) {
+		nvm_ver->etk_id = etk_id_h;
+		nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
+	} else {
+		nvm_ver->etk_id = etk_id_l;
+		nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
+	}
+}
+
 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
 {
 	u32 rxctrl;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index a01409e..4d4c0236 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
 
 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+void ixgbe_get_etk_id(struct ixgbe_hw *hw,
+		      struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
+				struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_orom_version(struct ixgbe_hw *hw,
+			    struct ixgbe_nvm_version *nvm_ver);
 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0aad1c2..3bcf58b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1014,16 +1014,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
 			      struct ethtool_drvinfo *drvinfo)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
-	u32 nvm_track_id;
 
 	strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
 	strlcpy(drvinfo->version, ixgbe_driver_version,
 		sizeof(drvinfo->version));
 
-	nvm_track_id = (adapter->eeprom_verh << 16) |
-			adapter->eeprom_verl;
-	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
-		 nvm_track_id);
+	strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+		sizeof(drvinfo->fw_version));
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
@@ -1156,6 +1153,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
 			memcpy(&temp_ring[i], adapter->rx_ring[i],
 			       sizeof(struct ixgbe_ring));
 
+			/* Clear copied XDP RX-queue info */
+			memset(&temp_ring[i].xdp_rxq, 0,
+			       sizeof(temp_ring[i].xdp_rxq));
+
 			temp_ring[i].count = new_rx_count;
 			err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
 			if (err) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index a23c2b5..6e6b3c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1034,11 +1034,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
 		 ixgbe_driver_name,
 		 ixgbe_driver_version);
 	/* Firmware Version */
-	snprintf(info->firmware_version,
-		 sizeof(info->firmware_version),
-		 "0x%08x",
-		 (adapter->eeprom_verh << 16) |
-		  adapter->eeprom_verl);
+	strlcpy(info->firmware_version, adapter->eeprom_id,
+		sizeof(info->firmware_version));
 
 	/* Model */
 	if (hw->mac.type == ixgbe_mac_82599EB) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8e2a957..cceafbc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -350,6 +350,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 		return false;
 
+	/* limit VMDq instances on the PF by number of Tx queues */
+	vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs);
+
 	/* Add starting offset to total pool count */
 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 
@@ -512,12 +515,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 #ifdef IXGBE_FCOE
 	u16 fcoe_i = 0;
 #endif
-	bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
 
 	/* only proceed if SR-IOV is enabled */
 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
 		return false;
 
+	/* limit l2fwd RSS based on total Tx queue limit */
+	rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i);
+
 	/* Add starting offset to total pool count */
 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
 
@@ -525,7 +530,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
 
 	/* 64 pool mode with 2 queues per pool */
-	if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
+	if (vmdq_i > 32) {
 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
 		rss_m = IXGBE_RSS_2Q_MASK;
 		rss_i = min_t(u16, rss_i, 2);
@@ -701,7 +706,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 	adapter->num_rx_queues = 1;
 	adapter->num_tx_queues = 1;
 	adapter->num_xdp_queues = 0;
-	adapter->num_rx_pools = adapter->num_rx_queues;
+	adapter->num_rx_pools = 1;
 	adapter->num_rx_queues_per_pool = 1;
 
 #ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 62a1891..e47e0c4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq;
 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
 
+static const struct net_device_ops ixgbe_netdev_ops;
+
+static bool netif_is_ixgbe(struct net_device *dev)
+{
+	return dev && (dev->netdev_ops == &ixgbe_netdev_ops);
+}
+
 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
 					  u32 reg, u16 *value)
 {
@@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
 
 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 {
-	struct ixgbe_adapter *adapter;
-	struct ixgbe_hw *hw;
-	u32 head, tail;
+	unsigned int head, tail;
 
-	if (ring->l2_accel_priv)
-		adapter = ring->l2_accel_priv->real_adapter;
-	else
-		adapter = netdev_priv(ring->netdev);
+	head = ring->next_to_clean;
+	tail = ring->next_to_use;
 
-	hw = &adapter->hw;
-	head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
-	tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
-
-	if (head != tail)
-		return (head < tail) ?
-			tail - head : (tail + ring->count - head);
-
-	return 0;
+	return ((head <= tail) ? tail : tail + ring->count) - head;
 }
 
 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
@@ -2318,12 +2313,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 #endif /* IXGBE_FCOE */
 	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 	bool xdp_xmit = false;
+	struct xdp_buff xdp;
+
+	xdp.rxq = &rx_ring->xdp_rxq;
 
 	while (likely(total_rx_packets < budget)) {
 		union ixgbe_adv_rx_desc *rx_desc;
 		struct ixgbe_rx_buffer *rx_buffer;
 		struct sk_buff *skb;
-		struct xdp_buff xdp;
 		unsigned int size;
 
 		/* return some buffers to hardware, one at a time is too slow */
@@ -2515,13 +2512,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
 }
 
-enum latency_range {
-	lowest_latency = 0,
-	low_latency = 1,
-	bulk_latency = 2,
-	latency_invalid = 255
-};
-
 /**
  * ixgbe_update_itr - update the dynamic ITR value based on statistics
  * @q_vector: structure containing interrupt and ring information
@@ -2534,8 +2524,6 @@ enum latency_range {
  *      based on theoretical maximum wire speed and thresholds were set based
  *      on testing data as well as attempting to minimize response time
  *      while increasing bulk throughput.
- *      this functionality is controlled by the InterruptThrottleRate module
- *      parameter (see ixgbe_param.c)
  **/
 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
 			     struct ixgbe_ring_container *ring_container)
@@ -3853,16 +3841,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
 	u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 vfreta = 0;
-	unsigned int pf_pool = adapter->num_vfs;
 
 	/* Write redirection table to HW */
 	for (i = 0; i < reta_entries; i++) {
+		u16 pool = adapter->num_rx_pools;
+
 		vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
-		if ((i & 3) == 3) {
-			IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
+		if ((i & 3) != 3)
+			continue;
+
+		while (pool--)
+			IXGBE_WRITE_REG(hw,
+					IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)),
 					vfreta);
-			vfreta = 0;
-		}
+		vfreta = 0;
 	}
 }
 
@@ -3899,13 +3891,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
-	unsigned int pf_pool = adapter->num_vfs;
 	int i, j;
 
 	/* Fill out hash function seeds */
-	for (i = 0; i < 10; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
-				*(adapter->rss_key + i));
+	for (i = 0; i < 10; i++) {
+		u16 pool = adapter->num_rx_pools;
+
+		while (pool--)
+			IXGBE_WRITE_REG(hw,
+					IXGBE_PFVFRSSRK(i, VMDQ_P(pool)),
+					*(adapter->rss_key + i));
+	}
 
 	/* Fill out the redirection table */
 	for (i = 0, j = 0; i < 64; i++, j++) {
@@ -3971,7 +3967,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 
 	if ((hw->mac.type >= ixgbe_mac_X550) &&
 	    (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
-		unsigned int pf_pool = adapter->num_vfs;
+		u16 pool = adapter->num_rx_pools;
 
 		/* Enable VF RSS mode */
 		mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
@@ -3981,7 +3977,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
 		ixgbe_setup_vfreta(adapter);
 		vfmrqc = IXGBE_MRQC_RSSEN;
 		vfmrqc |= rss_field;
-		IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
+
+		while (pool--)
+			IXGBE_WRITE_REG(hw,
+					IXGBE_PFVFMRQC(VMDQ_P(pool)),
+					vfmrqc);
 	} else {
 		ixgbe_setup_reta(adapter);
 		mrqc |= rss_field;
@@ -4144,7 +4144,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int rss_i = adapter->ring_feature[RING_F_RSS].indices;
-	u16 pool;
+	u16 pool = adapter->num_rx_pools;
 
 	/* PSRTYPE must be initialized in non 82598 adapters */
 	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
@@ -4161,7 +4161,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
 	else if (rss_i > 1)
 		psrtype |= 1u << 29;
 
-	for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
+	while (pool--)
 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
 }
 
@@ -4488,8 +4488,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			struct ixgbe_ring *ring = adapter->rx_ring[i];
 
-			if (ring->l2_accel_priv)
+			if (!netif_is_ixgbe(ring->netdev))
 				continue;
+
 			j = ring->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 			vlnctrl &= ~IXGBE_RXDCTL_VME;
@@ -4525,8 +4526,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			struct ixgbe_ring *ring = adapter->rx_ring[i];
 
-			if (ring->l2_accel_priv)
+			if (!netif_is_ixgbe(ring->netdev))
 				continue;
+
 			j = ring->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
 			vlnctrl |= IXGBE_RXDCTL_VME;
@@ -5277,29 +5279,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
 }
 
-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
-{
-	struct ixgbe_adapter *adapter = vadapter->real_adapter;
-	int rss_i = adapter->num_rx_queues_per_pool;
-	struct ixgbe_hw *hw = &adapter->hw;
-	u16 pool = vadapter->pool;
-	u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
-		      IXGBE_PSRTYPE_UDPHDR |
-		      IXGBE_PSRTYPE_IPV4HDR |
-		      IXGBE_PSRTYPE_L2HDR |
-		      IXGBE_PSRTYPE_IPV6HDR;
-
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		return;
-
-	if (rss_i > 3)
-		psrtype |= 2u << 29;
-	else if (rss_i > 1)
-		psrtype |= 1u << 29;
-
-	IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
-}
-
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  * @rx_ring: ring to free buffers from
@@ -5363,7 +5342,6 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
 	usleep_range(10000, 20000);
 	ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
 	ixgbe_clean_rx_ring(rx_ring);
-	rx_ring->l2_accel_priv = NULL;
 }
 
 static int ixgbe_fwd_ring_down(struct net_device *vdev,
@@ -5381,10 +5359,8 @@ static int ixgbe_fwd_ring_down(struct net_device *vdev,
 		adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
 	}
 
-	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
-		adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
 		adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
-	}
 
 
 	return 0;
@@ -5397,14 +5373,13 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
 	unsigned int rxbase, txbase, queues;
 	int i, baseq, err = 0;
 
-	if (!test_bit(accel->pool, &adapter->fwd_bitmask))
+	if (!test_bit(accel->pool, adapter->fwd_bitmask))
 		return 0;
 
 	baseq = accel->pool * adapter->num_rx_queues_per_pool;
-	netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+	netdev_dbg(vdev, "pool %i:%i queues %i:%i\n",
 		   accel->pool, adapter->num_rx_pools,
-		   baseq, baseq + adapter->num_rx_queues_per_pool,
-		   adapter->fwd_bitmask);
+		   baseq, baseq + adapter->num_rx_queues_per_pool);
 
 	accel->netdev = vdev;
 	accel->rx_base_queue = rxbase = baseq;
@@ -5415,14 +5390,11 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
 
 	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
 		adapter->rx_ring[rxbase + i]->netdev = vdev;
-		adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
 		ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
 	}
 
-	for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+	for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
 		adapter->tx_ring[txbase + i]->netdev = vdev;
-		adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
-	}
 
 	queues = min_t(unsigned int,
 		       adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
@@ -5435,10 +5407,10 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev,
 		goto fwd_queue_err;
 
 	if (is_valid_ether_addr(vdev->dev_addr))
-		ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
+		ixgbe_add_mac_filter(adapter, vdev->dev_addr,
+				     VMDQ_P(accel->pool));
 
-	ixgbe_fwd_psrtype(accel);
-	ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+	ixgbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->pool), adapter);
 	return err;
 fwd_queue_err:
 	ixgbe_fwd_ring_down(vdev, accel);
@@ -6302,7 +6274,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
 	}
 
 	/* PF holds first pool slot */
-	set_bit(0, &adapter->fwd_bitmask);
+	set_bit(0, adapter->fwd_bitmask);
 	set_bit(__IXGBE_DOWN, &adapter->state);
 
 	return 0;
@@ -6444,6 +6416,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
+	/* XDP RX-queue info */
+	if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
+			     rx_ring->queue_index) < 0)
+		goto err;
+
 	rx_ring->xdp_prog = adapter->xdp_prog;
 
 	return 0;
@@ -6541,6 +6518,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
 	ixgbe_clean_rx_ring(rx_ring);
 
 	rx_ring->xdp_prog = NULL;
+	xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 	vfree(rx_ring->rx_buffer_info);
 	rx_ring->rx_buffer_info = NULL;
 
@@ -6783,7 +6761,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
-	u32 ctrl, fctrl;
+	u32 ctrl;
 	u32 wufc = adapter->wol;
 #ifdef CONFIG_PM
 	int retval = 0;
@@ -6808,18 +6786,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 		hw->mac.ops.stop_link_on_d3(hw);
 
 	if (wufc) {
+		u32 fctrl;
+
 		ixgbe_set_rx_mode(netdev);
 
 		/* enable the optics for 82599 SFP+ fiber as we can WoL */
 		if (hw->mac.ops.enable_tx_laser)
 			hw->mac.ops.enable_tx_laser(hw);
 
-		/* turn on all-multi mode if wake on multicast is enabled */
-		if (wufc & IXGBE_WUFC_MC) {
-			fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-			fctrl |= IXGBE_FCTRL_MPE;
-			IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
-		}
+		/* enable the reception of multicast packets */
+		fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+		fctrl |= IXGBE_FCTRL_MPE;
+		IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
 		ctrl |= IXGBE_CTRL_GIO_DIS;
@@ -7655,6 +7633,7 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	u32 cap_speed;
 	u32 speed;
 	bool autoneg = false;
 
@@ -7667,16 +7646,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
 
 	adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
 
-	speed = hw->phy.autoneg_advertised;
-	if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
-		hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+	hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg);
 
-		/* setup the highest link when no autoneg */
-		if (!autoneg) {
-			if (speed & IXGBE_LINK_SPEED_10GB_FULL)
-				speed = IXGBE_LINK_SPEED_10GB_FULL;
-		}
-	}
+	/* advertise highest capable link speed */
+	if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL))
+		speed = IXGBE_LINK_SPEED_10GB_FULL;
+	else
+		speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL |
+				     IXGBE_LINK_SPEED_1GB_FULL);
 
 	if (hw->mac.ops.setup_link)
 		hw->mac.ops.setup_link(hw, speed, true);
@@ -8869,7 +8846,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	struct ixgbe_hw *hw = &adapter->hw;
-	bool pools;
 
 	/* Hardware supports up to 8 traffic classes */
 	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
@@ -8878,10 +8854,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 	if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
 		return -EINVAL;
 
-	pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
-	if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
-		return -EBUSY;
-
 	/* Hardware has to reinitialize queues and interrupts to
 	 * match packet buffer alignment. Unfortunately, the
 	 * hardware is not flexible enough to do this dynamically.
@@ -9044,6 +9016,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data)
 static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
 				  u8 *queue, u64 *action)
 {
+	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
 	unsigned int num_vfs = adapter->num_vfs, vf;
 	struct upper_walk_data data;
 	struct net_device *upper;
@@ -9052,11 +9025,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
 	for (vf = 0; vf < num_vfs; ++vf) {
 		upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
 		if (upper->ifindex == ifindex) {
-			if (adapter->num_rx_pools > 1)
-				*queue = vf * 2;
-			else
-				*queue = vf * adapter->num_rx_queues_per_pool;
-
+			*queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
 			*action = vf + 1;
 			*action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
 			return 0;
@@ -9101,9 +9070,11 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
 
 		/* Redirect to a VF or a offloaded macvlan */
 		if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
+			struct net_device *dev = tcf_mirred_dev(a);
 
-			err = handle_redirect_action(adapter, ifindex, queue,
+			if (!dev)
+				return -EINVAL;
+			err = handle_redirect_action(adapter, dev->ifindex, queue,
 						     action);
 			if (err == 0)
 				return err;
@@ -9821,6 +9792,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 	struct ixgbe_fwd_adapter *fwd_adapter = NULL;
 	struct ixgbe_adapter *adapter = netdev_priv(pdev);
 	int used_pools = adapter->num_vfs + adapter->num_rx_pools;
+	int tcs = netdev_get_num_tc(pdev) ? : 1;
 	unsigned int limit;
 	int pool, err;
 
@@ -9848,7 +9820,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 	}
 
 	if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
-	      adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
+	      adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) ||
 	    (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
 		return ERR_PTR(-EBUSY);
 
@@ -9856,10 +9828,9 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 	if (!fwd_adapter)
 		return ERR_PTR(-ENOMEM);
 
-	pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
-	adapter->num_rx_pools++;
-	set_bit(pool, &adapter->fwd_bitmask);
-	limit = find_last_bit(&adapter->fwd_bitmask, 32);
+	pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
+	set_bit(pool, adapter->fwd_bitmask);
+	limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1);
 
 	/* Enable VMDq flag so device will be set in VM mode */
 	adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
@@ -9885,8 +9856,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
 	/* unwind counter and free adapter struct */
 	netdev_info(pdev,
 		    "%s: dfwd hardware acceleration failed\n", vdev->name);
-	clear_bit(pool, &adapter->fwd_bitmask);
-	adapter->num_rx_pools--;
+	clear_bit(pool, adapter->fwd_bitmask);
 	kfree(fwd_adapter);
 	return ERR_PTR(err);
 }
@@ -9897,10 +9867,9 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
 	struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
 	unsigned int limit;
 
-	clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
-	adapter->num_rx_pools--;
+	clear_bit(fwd_adapter->pool, adapter->fwd_bitmask);
 
-	limit = find_last_bit(&adapter->fwd_bitmask, 32);
+	limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
 	adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
 	ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
 
@@ -9915,11 +9884,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
 	}
 
 	ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
-	netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
+	netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
 		   fwd_adapter->pool, adapter->num_rx_pools,
 		   fwd_adapter->rx_base_queue,
-		   fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
-		   adapter->fwd_bitmask);
+		   fwd_adapter->rx_base_queue +
+		   adapter->num_rx_queues_per_pool);
 	kfree(fwd_adapter);
 }
 
@@ -10233,6 +10202,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
 }
 
 /**
+ * ixgbe_set_fw_version - Set FW version
+ * @adapter: the adapter private structure
+ *
+ * This function is used by probe and ethtool to determine the FW version to
+ * format to display. The FW version is taken from the EEPROM/NVM.
+ */
+static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_nvm_version nvm_ver;
+
+	ixgbe_get_oem_prod_version(hw, &nvm_ver);
+	if (nvm_ver.oem_valid) {
+		snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+			 "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor,
+			 nvm_ver.oem_release);
+		return;
+	}
+
+	ixgbe_get_etk_id(hw, &nvm_ver);
+	ixgbe_get_orom_version(hw, &nvm_ver);
+
+	if (nvm_ver.or_valid) {
+		snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+			 "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major,
+			 nvm_ver.or_build, nvm_ver.or_patch);
+		return;
+	}
+
+	/* Set ETrack ID format */
+	snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+		 "0x%08x", nvm_ver.etk_id);
+}
+
+/**
  * ixgbe_probe - Device Initialization Routine
  * @pdev: PCI device information struct
  * @ent: entry in ixgbe_pci_tbl
@@ -10568,8 +10572,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
 	/* save off EEPROM version number */
-	hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
-	hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
+	ixgbe_set_fw_version(adapter);
 
 	/* pick up the PCI bus settings for reporting later */
 	if (ixgbe_pcie_from_parent(hw))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 112d24c..0085f46 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -227,9 +227,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
 int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 {
 	unsigned int num_vfs = adapter->num_vfs, vf;
-	struct ixgbe_hw *hw = &adapter->hw;
-	u32 gpie;
-	u32 vmdctl;
 	int rss;
 
 	/* set num VFs to 0 to prevent access to vfinfo */
@@ -271,18 +268,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
 	pci_disable_sriov(adapter->pdev);
 #endif
 
-	/* turn off device IOV mode */
-	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
-	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
-	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
-	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
-	/* set default pool back to 0 */
-	vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-	vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
-	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
-	IXGBE_WRITE_FLUSH(hw);
-
 	/* Disable VMDq flag so device will be set in VM mode */
 	if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
 		adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
@@ -305,10 +290,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 {
 #ifdef CONFIG_PCI_IOV
 	struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
-	int err = 0;
-	u8 num_tc;
-	int i;
 	int pre_existing_vfs = pci_num_vf(dev);
+	int err = 0, num_rx_pools, i, limit;
+	u8 num_tc;
 
 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 		err = ixgbe_disable_sriov(adapter);
@@ -331,22 +315,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
 	 * other values out of range.
 	 */
 	num_tc = netdev_get_num_tc(adapter->netdev);
+	num_rx_pools = adapter->num_rx_pools;
+	limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC :
+		(num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC;
 
-	if (num_tc > 4) {
-		if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) {
-			e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC);
-			return -EPERM;
-		}
-	} else if ((num_tc > 1) && (num_tc <= 4)) {
-		if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) {
-			e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC);
-			return -EPERM;
-		}
-	} else {
-		if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) {
-			e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC);
-			return -EPERM;
-		}
+	if (num_vfs > (limit - num_rx_pools)) {
+		e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n",
+			  num_tc, num_rx_pools - 1, limit - num_rx_pools);
+		return -EPERM;
 	}
 
 	err = __ixgbe_enable_sriov(adapter, num_vfs);
@@ -378,13 +354,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
 	int err;
 #ifdef CONFIG_PCI_IOV
 	u32 current_flags = adapter->flags;
+	int prev_num_vf = pci_num_vf(dev);
 #endif
 
 	err = ixgbe_disable_sriov(adapter);
 
 	/* Only reinit if no error and state changed */
 #ifdef CONFIG_PCI_IOV
-	if (!err && current_flags != adapter->flags)
+	if (!err && (current_flags != adapter->flags ||
+		     prev_num_vf != pci_num_vf(dev)))
 		ixgbe_sriov_reinit(adapter);
 #endif
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ffa0ee5..21eb79a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data {
 	struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
 };
 
+#define NVM_OROM_OFFSET		0x17
+#define NVM_OROM_BLK_LOW	0x83
+#define NVM_OROM_BLK_HI		0x84
+#define NVM_OROM_PATCH_MASK	0xFF
+#define NVM_OROM_SHIFT		8
+
+#define NVM_VER_MASK		0x00FF	/* version mask */
+#define NVM_VER_SHIFT		8	/* version bit shift */
+#define NVM_OEM_PROD_VER_PTR	0x1B /* OEM Product version block pointer */
+#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */
+#define NVM_OEM_PROD_VER_OFF_L	0x2  /* OEM Product version offset low */
+#define NVM_OEM_PROD_VER_OFF_H	0x3  /* OEM Product version offset high */
+#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */
+#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */
+#define NVM_ETK_OFF_LOW		0x2D /* version low order word */
+#define NVM_ETK_OFF_HI		0x2E /* version high order word */
+#define NVM_ETK_SHIFT		16   /* high version word shift */
+#define NVM_VER_INVALID		0xFFFF
+#define NVM_ETK_VALID		0x8000
+#define NVM_INVALID_PTR		0xFFFF
+#define NVM_VER_SIZE		32   /* version sting size */
+
+struct ixgbe_nvm_version {
+	u32 etk_id;
+	u8  nvm_major;
+	u16 nvm_minor;
+	u8  nvm_id;
+
+	bool oem_valid;
+	u8   oem_major;
+	u8   oem_minor;
+	u16  oem_release;
+
+	bool or_valid;
+	u8  or_major;
+	u16 or_build;
+	u8  or_patch;
+};
+
 /* Interrupt Registers */
 #define IXGBE_EICR      0x00800
 #define IXGBE_EICS      0x00808
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f4a691..573f743 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1896,10 +1896,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
 	unsigned int flags = netdev->flags;
 	int xcast_mode;
 
-	xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
-		     (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
-		     IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
-
 	/* request the most inclusive mode we need */
 	if (flags & IFF_PROMISC)
 		xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index da6fb82..ebe5c91 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -60,7 +60,7 @@
 	depends on ARCH_MVEBU || COMPILE_TEST
 	depends on HAS_DMA
 	select MVMDIO
-	select FIXED_PHY
+	select PHYLINK
 	---help---
 	  This driver supports the network interface units in the
 	  Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a539263..25e9a55 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -28,7 +28,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
-#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
 #include <net/hwbm.h>
@@ -189,6 +189,7 @@
 #define MVNETA_GMAC_CTRL_0                       0x2c00
 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
+#define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define MVNETA_GMAC_CTRL_2                       0x2c08
 #define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
@@ -204,13 +205,19 @@
 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
+#define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
+#define      MVNETA_GMAC_SYNC_OK                 BIT(14)
 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 #define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
+#define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
+#define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
+#define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
+#define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
@@ -237,6 +244,12 @@
 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 
+#define MVNETA_LPI_CTRL_0                        0x2cc0
+#define MVNETA_LPI_CTRL_1                        0x2cc4
+#define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
+#define MVNETA_LPI_CTRL_2                        0x2cc8
+#define MVNETA_LPI_STATUS                        0x2ccc
+
 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
 
 /* Descriptor ring Macros */
@@ -313,6 +326,11 @@
 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 	(((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 
+enum {
+	ETHTOOL_STAT_EEE_WAKEUP,
+	ETHTOOL_MAX_STATS,
+};
+
 struct mvneta_statistic {
 	unsigned short offset;
 	unsigned short type;
@@ -321,6 +339,7 @@ struct mvneta_statistic {
 
 #define T_REG_32	32
 #define T_REG_64	64
+#define T_SW		1
 
 static const struct mvneta_statistic mvneta_statistics[] = {
 	{ 0x3000, T_REG_64, "good_octets_received", },
@@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = {
 	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
 	{ 0x3054, T_REG_32, "fc_sent", },
 	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
+	{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 };
 
 struct mvneta_pcpu_stats {
@@ -407,20 +427,20 @@ struct mvneta_port {
 	u16 tx_ring_size;
 	u16 rx_ring_size;
 
-	struct mii_bus *mii_bus;
 	phy_interface_t phy_interface;
-	struct device_node *phy_node;
-	unsigned int link;
-	unsigned int duplex;
-	unsigned int speed;
+	struct device_node *dn;
 	unsigned int tx_csum_limit;
-	unsigned int use_inband_status:1;
+	struct phylink *phylink;
 
 	struct mvneta_bm *bm_priv;
 	struct mvneta_bm_pool *pool_long;
 	struct mvneta_bm_pool *pool_short;
 	int bm_win_id;
 
+	bool eee_enabled;
+	bool eee_active;
+	bool tx_lpi_enabled;
+
 	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 
 	u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
@@ -1214,10 +1234,6 @@ static void mvneta_port_disable(struct mvneta_port *pp)
 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
-	pp->link = 0;
-	pp->duplex = -1;
-	pp->speed = 0;
-
 	udelay(200);
 }
 
@@ -1277,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
 		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
 }
 
-static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
-{
-	u32 val;
-
-	if (enable) {
-		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-		val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
-			 MVNETA_GMAC_FORCE_LINK_DOWN |
-			 MVNETA_GMAC_AN_FLOW_CTRL_EN);
-		val |= MVNETA_GMAC_INBAND_AN_ENABLE |
-		       MVNETA_GMAC_AN_SPEED_EN |
-		       MVNETA_GMAC_AN_DUPLEX_EN;
-		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
-		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-		val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-	} else {
-		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
-		       MVNETA_GMAC_AN_SPEED_EN |
-		       MVNETA_GMAC_AN_DUPLEX_EN);
-		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
-		val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
-
-		val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-		val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
-		mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-	}
-}
-
 static void mvneta_percpu_unmask_interrupt(void *arg)
 {
 	struct mvneta_port *pp = arg;
@@ -1467,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
 	val &= ~MVNETA_PHY_POLLING_ENABLE;
 	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
 
-	mvneta_set_autoneg(pp, pp->use_inband_status);
 	mvneta_set_ucast_table(pp, -1);
 	mvneta_set_special_mcast_table(pp, -1);
 	mvneta_set_other_mcast_table(pp, -1);
@@ -2692,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static int mvneta_fixed_link_update(struct mvneta_port *pp,
-				    struct phy_device *phy)
+static void mvneta_link_change(struct mvneta_port *pp)
 {
-	struct fixed_phy_status status;
-	struct fixed_phy_status changed = {};
 	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
 
-	status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
-	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
-		status.speed = SPEED_1000;
-	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
-		status.speed = SPEED_100;
-	else
-		status.speed = SPEED_10;
-	status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
-	changed.link = 1;
-	changed.speed = 1;
-	changed.duplex = 1;
-	fixed_phy_update_state(phy, &status, &changed);
-	return 0;
+	phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
 }
 
 /* NAPI handler
@@ -2727,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 	u32 cause_rx_tx;
 	int rx_queue;
 	struct mvneta_port *pp = netdev_priv(napi->dev);
-	struct net_device *ndev = pp->dev;
 	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 
 	if (!netif_running(pp->dev)) {
@@ -2741,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
 
 		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
-		if (pp->use_inband_status && (cause_misc &
-				(MVNETA_CAUSE_PHY_STATUS_CHANGE |
-				 MVNETA_CAUSE_LINK_CHANGE |
-				 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
-			mvneta_fixed_link_update(pp, ndev->phydev);
-		}
+
+		if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
+				  MVNETA_CAUSE_LINK_CHANGE))
+			mvneta_link_change(pp);
 	}
 
 	/* Release Tx descriptors */
@@ -3060,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
 	int cpu;
-	struct net_device *ndev = pp->dev;
 
 	mvneta_max_rx_size_set(pp, pp->pkt_size);
 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3085,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp)
 
 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-		    MVNETA_CAUSE_LINK_CHANGE |
-		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		    MVNETA_CAUSE_LINK_CHANGE);
 
-	phy_start(ndev->phydev);
+	phylink_start(pp->phylink);
 	netif_tx_start_all_queues(pp->dev);
 }
 
 static void mvneta_stop_dev(struct mvneta_port *pp)
 {
 	unsigned int cpu;
-	struct net_device *ndev = pp->dev;
 
-	phy_stop(ndev->phydev);
+	phylink_stop(pp->phylink);
 
 	if (!pp->neta_armada3700) {
 		for_each_online_cpu(cpu) {
@@ -3251,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
 	return 0;
 }
 
-static void mvneta_adjust_link(struct net_device *ndev)
+static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
+			    struct phylink_link_state *state)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+	/* We only support QSGMII, SGMII, 802.3z and RGMII modes */
+	if (state->interface != PHY_INTERFACE_MODE_NA &&
+	    state->interface != PHY_INTERFACE_MODE_QSGMII &&
+	    state->interface != PHY_INTERFACE_MODE_SGMII &&
+	    !phy_interface_mode_is_8023z(state->interface) &&
+	    !phy_interface_mode_is_rgmii(state->interface)) {
+		bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+		return;
+	}
+
+	/* Allow all the expected bits */
+	phylink_set(mask, Autoneg);
+	phylink_set_port_modes(mask);
+
+	/* Asymmetric pause is unsupported */
+	phylink_set(mask, Pause);
+	/* Half-duplex at speeds higher than 100Mbit is unsupported */
+	phylink_set(mask, 1000baseT_Full);
+	phylink_set(mask, 1000baseX_Full);
+
+	if (!phy_interface_mode_is_8023z(state->interface)) {
+		/* 10M and 100M are only supported in non-802.3z mode */
+		phylink_set(mask, 10baseT_Half);
+		phylink_set(mask, 10baseT_Full);
+		phylink_set(mask, 100baseT_Half);
+		phylink_set(mask, 100baseT_Full);
+	}
+
+	bitmap_and(supported, supported, mask,
+		   __ETHTOOL_LINK_MODE_MASK_NBITS);
+	bitmap_and(state->advertising, state->advertising, mask,
+		   __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int mvneta_mac_link_state(struct net_device *ndev,
+				 struct phylink_link_state *state)
 {
 	struct mvneta_port *pp = netdev_priv(ndev);
-	struct phy_device *phydev = ndev->phydev;
-	int status_change = 0;
+	u32 gmac_stat;
 
-	if (phydev->link) {
-		if ((pp->speed != phydev->speed) ||
-		    (pp->duplex != phydev->duplex)) {
-			u32 val;
+	gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
 
-			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
-				 MVNETA_GMAC_CONFIG_GMII_SPEED |
-				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
+		state->speed = SPEED_1000;
+	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
+		state->speed = SPEED_100;
+	else
+		state->speed = SPEED_10;
 
-			if (phydev->duplex)
-				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+	state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
+	state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
+	state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
 
-			if (phydev->speed == SPEED_1000)
-				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-			else if (phydev->speed == SPEED_100)
-				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+	state->pause = 0;
+	if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
+		state->pause |= MLO_PAUSE_RX;
+	if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
+		state->pause |= MLO_PAUSE_TX;
 
-			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+	return 1;
+}
 
-			pp->duplex = phydev->duplex;
-			pp->speed  = phydev->speed;
-		}
+static void mvneta_mac_an_restart(struct net_device *ndev)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+		    gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
+	mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+		    gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
+}
+
+static void mvneta_mac_config(struct net_device *ndev, unsigned int mode,
+	const struct phylink_link_state *state)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
+	u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
+	u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+	u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+
+	new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
+	new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
+				   MVNETA_GMAC2_PORT_RESET);
+	new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+	new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+			     MVNETA_GMAC_INBAND_RESTART_AN |
+			     MVNETA_GMAC_CONFIG_MII_SPEED |
+			     MVNETA_GMAC_CONFIG_GMII_SPEED |
+			     MVNETA_GMAC_AN_SPEED_EN |
+			     MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
+			     MVNETA_GMAC_CONFIG_FLOW_CTRL |
+			     MVNETA_GMAC_AN_FLOW_CTRL_EN |
+			     MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+			     MVNETA_GMAC_AN_DUPLEX_EN);
+
+	/* Even though it might look weird, when we're configured in
+	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
+	 */
+	new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
+
+	if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
+	    state->interface == PHY_INTERFACE_MODE_SGMII ||
+	    phy_interface_mode_is_8023z(state->interface))
+		new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
+
+	if (phylink_test(state->advertising, Pause))
+		new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+	if (state->pause & MLO_PAUSE_TXRX_MASK)
+		new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
+
+	if (!phylink_autoneg_inband(mode)) {
+		/* Phy or fixed speed */
+		if (state->duplex)
+			new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+		if (state->speed == SPEED_1000)
+			new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+		else if (state->speed == SPEED_100)
+			new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
+	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+		/* SGMII mode receives the state from the PHY */
+		new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
+		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+				     MVNETA_GMAC_FORCE_LINK_PASS)) |
+			 MVNETA_GMAC_INBAND_AN_ENABLE |
+			 MVNETA_GMAC_AN_SPEED_EN |
+			 MVNETA_GMAC_AN_DUPLEX_EN;
+	} else {
+		/* 802.3z negotiation - only 1000base-X */
+		new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
+		new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+		new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
+				     MVNETA_GMAC_FORCE_LINK_PASS)) |
+			 MVNETA_GMAC_INBAND_AN_ENABLE |
+			 MVNETA_GMAC_CONFIG_GMII_SPEED |
+			 /* The MAC only supports FD mode */
+			 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+		if (state->pause & MLO_PAUSE_AN && state->an_enabled)
+			new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
 	}
 
-	if (phydev->link != pp->link) {
-		if (!phydev->link) {
-			pp->duplex = -1;
-			pp->speed = 0;
-		}
-
-		pp->link = phydev->link;
-		status_change = 1;
+	/* Armada 370 documentation says we can only change the port mode
+	 * and in-band enable when the link is down, so force it down
+	 * while making these changes. We also do this for GMAC_CTRL2 */
+	if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
+	    (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
+	    (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
+			    (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
+			    MVNETA_GMAC_FORCE_LINK_DOWN);
 	}
 
-	if (status_change) {
-		if (phydev->link) {
-			if (!pp->use_inband_status) {
-				u32 val = mvreg_read(pp,
-						  MVNETA_GMAC_AUTONEG_CONFIG);
-				val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
-				val |= MVNETA_GMAC_FORCE_LINK_PASS;
-				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
-					    val);
-			}
-			mvneta_port_up(pp);
-		} else {
-			if (!pp->use_inband_status) {
-				u32 val = mvreg_read(pp,
-						  MVNETA_GMAC_AUTONEG_CONFIG);
-				val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
-				val |= MVNETA_GMAC_FORCE_LINK_DOWN;
-				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
-					    val);
-			}
-			mvneta_port_down(pp);
-		}
-		phy_print_status(phydev);
+	if (new_ctrl0 != gmac_ctrl0)
+		mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
+	if (new_ctrl2 != gmac_ctrl2)
+		mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
+	if (new_clk != gmac_clk)
+		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
+	if (new_an != gmac_an)
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
+
+	if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
+		while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
+			MVNETA_GMAC2_PORT_RESET) != 0)
+			continue;
 	}
 }
 
-static int mvneta_mdio_probe(struct mvneta_port *pp)
+static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
 {
-	struct phy_device *phy_dev;
-	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+	u32 lpi_ctl1;
 
-	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
-				 pp->phy_interface);
-	if (!phy_dev) {
-		netdev_err(pp->dev, "could not find the PHY\n");
-		return -ENODEV;
+	lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
+	if (enable)
+		lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
+	else
+		lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
+	mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
+}
+
+static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 val;
+
+	mvneta_port_down(pp);
+
+	if (!phylink_autoneg_inband(mode)) {
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+		val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
 	}
 
-	phy_ethtool_get_wol(phy_dev, &wol);
+	pp->eee_active = false;
+	mvneta_set_eee(pp, false);
+}
+
+static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode,
+			       struct phy_device *phy)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
+	u32 val;
+
+	if (!phylink_autoneg_inband(mode)) {
+		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+		val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+		val |= MVNETA_GMAC_FORCE_LINK_PASS;
+		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+	}
+
+	mvneta_port_up(pp);
+
+	if (phy && pp->eee_enabled) {
+		pp->eee_active = phy_init_eee(phy, 0) >= 0;
+		mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
+	}
+}
+
+static const struct phylink_mac_ops mvneta_phylink_ops = {
+	.validate = mvneta_validate,
+	.mac_link_state = mvneta_mac_link_state,
+	.mac_an_restart = mvneta_mac_an_restart,
+	.mac_config = mvneta_mac_config,
+	.mac_link_down = mvneta_mac_link_down,
+	.mac_link_up = mvneta_mac_link_up,
+};
+
+static int mvneta_mdio_probe(struct mvneta_port *pp)
+{
+	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+	int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
+
+	if (err)
+		netdev_err(pp->dev, "could not attach PHY: %d\n", err);
+
+	phylink_ethtool_get_wol(pp->phylink, &wol);
 	device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
 
-	phy_dev->supported &= PHY_GBIT_FEATURES;
-	phy_dev->advertising = phy_dev->supported;
-
-	pp->link    = 0;
-	pp->duplex  = 0;
-	pp->speed   = 0;
-
-	return 0;
+	return err;
 }
 
 static void mvneta_mdio_remove(struct mvneta_port *pp)
 {
-	struct net_device *ndev = pp->dev;
-
-	phy_disconnect(ndev->phydev);
+	phylink_disconnect_phy(pp->phylink);
 }
 
 /* Electing a CPU must be done in an atomic way: it should be done
@@ -3455,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-		    MVNETA_CAUSE_LINK_CHANGE |
-		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		    MVNETA_CAUSE_LINK_CHANGE);
 	netif_tx_start_all_queues(pp->dev);
 	spin_unlock(&pp->lock);
 	return 0;
@@ -3497,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
 	on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
-		    MVNETA_CAUSE_LINK_CHANGE |
-		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		    MVNETA_CAUSE_LINK_CHANGE);
 	netif_tx_start_all_queues(pp->dev);
 	return 0;
 }
@@ -3626,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev)
 
 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-	if (!dev->phydev)
-		return -ENOTSUPP;
+	struct mvneta_port *pp = netdev_priv(dev);
 
-	return phy_mii_ioctl(dev->phydev, ifr, cmd);
+	return phylink_mii_ioctl(pp->phylink, ifr, cmd);
 }
 
 /* Ethtool methods */
@@ -3640,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
 				  const struct ethtool_link_ksettings *cmd)
 {
 	struct mvneta_port *pp = netdev_priv(ndev);
-	struct phy_device *phydev = ndev->phydev;
 
-	if (!phydev)
-		return -ENODEV;
+	return phylink_ethtool_ksettings_set(pp->phylink, cmd);
+}
 
-	if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
-		u32 val;
+/* Get link ksettings for ethtools */
+static int
+mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
+				  struct ethtool_link_ksettings *cmd)
+{
+	struct mvneta_port *pp = netdev_priv(ndev);
 
-		mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
+	return phylink_ethtool_ksettings_get(pp->phylink, cmd);
+}
 
-		if (cmd->base.autoneg == AUTONEG_DISABLE) {
-			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
-			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
-				 MVNETA_GMAC_CONFIG_GMII_SPEED |
-				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+static int mvneta_ethtool_nway_reset(struct net_device *dev)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
 
-			if (phydev->duplex)
-				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
-			if (phydev->speed == SPEED_1000)
-				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-			else if (phydev->speed == SPEED_100)
-				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
-
-			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
-		}
-
-		pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
-		netdev_info(pp->dev, "autoneg status set to %i\n",
-			    pp->use_inband_status);
-
-		if (netif_running(ndev)) {
-			mvneta_port_down(pp);
-			mvneta_port_up(pp);
-		}
-	}
-
-	return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+	return phylink_ethtool_nway_reset(pp->phylink);
 }
 
 /* Set interrupt coalescing for ethtools */
@@ -3769,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
 	return 0;
 }
 
+static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
+					  struct ethtool_pauseparam *pause)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	phylink_ethtool_get_pauseparam(pp->phylink, pause);
+}
+
+static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
+					 struct ethtool_pauseparam *pause)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	return phylink_ethtool_set_pauseparam(pp->phylink, pause);
+}
+
 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
 				       u8 *data)
 {
@@ -3785,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
 {
 	const struct mvneta_statistic *s;
 	void __iomem *base = pp->base;
-	u32 high, low, val;
-	u64 val64;
+	u32 high, low;
+	u64 val;
 	int i;
 
 	for (i = 0, s = mvneta_statistics;
 	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
 	     s++, i++) {
+		val = 0;
+
 		switch (s->type) {
 		case T_REG_32:
 			val = readl_relaxed(base + s->offset);
-			pp->ethtool_stats[i] += val;
 			break;
 		case T_REG_64:
 			/* Docs say to read low 32-bit then high */
 			low = readl_relaxed(base + s->offset);
 			high = readl_relaxed(base + s->offset + 4);
-			val64 = (u64)high << 32 | low;
-			pp->ethtool_stats[i] += val64;
+			val = (u64)high << 32 | low;
+			break;
+		case T_SW:
+			switch (s->offset) {
+			case ETHTOOL_STAT_EEE_WAKEUP:
+				val = phylink_get_eee_err(pp->phylink);
+				break;
+			}
 			break;
 		}
+
+		pp->ethtool_stats[i] += val;
 	}
 }
 
@@ -3939,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
 static void mvneta_ethtool_get_wol(struct net_device *dev,
 				   struct ethtool_wolinfo *wol)
 {
-	wol->supported = 0;
-	wol->wolopts = 0;
+	struct mvneta_port *pp = netdev_priv(dev);
 
-	if (dev->phydev)
-		phy_ethtool_get_wol(dev->phydev, wol);
+	phylink_ethtool_get_wol(pp->phylink, wol);
 }
 
 static int mvneta_ethtool_set_wol(struct net_device *dev,
 				  struct ethtool_wolinfo *wol)
 {
+	struct mvneta_port *pp = netdev_priv(dev);
 	int ret;
 
-	if (!dev->phydev)
-		return -EOPNOTSUPP;
-
-	ret = phy_ethtool_set_wol(dev->phydev, wol);
+	ret = phylink_ethtool_set_wol(pp->phylink, wol);
 	if (!ret)
 		device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
 
 	return ret;
 }
 
+static int mvneta_ethtool_get_module_info(struct net_device *dev,
+					  struct ethtool_modinfo *modinfo)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	return phylink_ethtool_get_module_info(pp->phylink, modinfo);
+}
+
+static int mvneta_ethtool_get_module_eeprom(struct net_device *dev,
+					    struct ethtool_eeprom *ee, u8 *buf)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+
+	return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf);
+}
+
+static int mvneta_ethtool_get_eee(struct net_device *dev,
+				  struct ethtool_eee *eee)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	u32 lpi_ctl0;
+
+	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+
+	eee->eee_enabled = pp->eee_enabled;
+	eee->eee_active = pp->eee_active;
+	eee->tx_lpi_enabled = pp->tx_lpi_enabled;
+	eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
+
+	return phylink_ethtool_get_eee(pp->phylink, eee);
+}
+
+static int mvneta_ethtool_set_eee(struct net_device *dev,
+				  struct ethtool_eee *eee)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	u32 lpi_ctl0;
+
+	/* The Armada 37x documents do not give limits for this other than
+	 * it being an 8-bit register. */
+	if (eee->tx_lpi_enabled &&
+	    (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+		return -EINVAL;
+
+	lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
+	lpi_ctl0 &= ~(0xff << 8);
+	lpi_ctl0 |= eee->tx_lpi_timer << 8;
+	mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
+
+	pp->eee_enabled = eee->eee_enabled;
+	pp->tx_lpi_enabled = eee->tx_lpi_enabled;
+
+	mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
+
+	return phylink_ethtool_set_eee(pp->phylink, eee);
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
 	.ndo_open            = mvneta_open,
 	.ndo_stop            = mvneta_stop,
@@ -3974,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = {
 };
 
 static const struct ethtool_ops mvneta_eth_tool_ops = {
-	.nway_reset	= phy_ethtool_nway_reset,
+	.nway_reset	= mvneta_ethtool_nway_reset,
 	.get_link       = ethtool_op_get_link,
 	.set_coalesce   = mvneta_ethtool_set_coalesce,
 	.get_coalesce   = mvneta_ethtool_get_coalesce,
 	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
 	.get_ringparam  = mvneta_ethtool_get_ringparam,
 	.set_ringparam	= mvneta_ethtool_set_ringparam,
+	.get_pauseparam	= mvneta_ethtool_get_pauseparam,
+	.set_pauseparam	= mvneta_ethtool_set_pauseparam,
 	.get_strings	= mvneta_ethtool_get_strings,
 	.get_ethtool_stats = mvneta_ethtool_get_stats,
 	.get_sset_count	= mvneta_ethtool_get_sset_count,
@@ -3988,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = {
 	.get_rxnfc	= mvneta_ethtool_get_rxnfc,
 	.get_rxfh	= mvneta_ethtool_get_rxfh,
 	.set_rxfh	= mvneta_ethtool_set_rxfh,
-	.get_link_ksettings = phy_ethtool_get_link_ksettings,
+	.get_link_ksettings = mvneta_ethtool_get_link_ksettings,
 	.set_link_ksettings = mvneta_ethtool_set_link_ksettings,
 	.get_wol        = mvneta_ethtool_get_wol,
 	.set_wol        = mvneta_ethtool_set_wol,
+	.get_module_info = mvneta_ethtool_get_module_info,
+	.get_module_eeprom = mvneta_ethtool_get_module_eeprom,
+	.get_eee	= mvneta_ethtool_get_eee,
+	.set_eee	= mvneta_ethtool_set_eee,
 };
 
 /* Initialize hw */
@@ -4091,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
 /* Power up the port */
 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 {
-	u32 ctrl;
-
 	/* MAC Cause register should be cleared */
 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
-	ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
-	/* Even though it might look weird, when we're configured in
-	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
-	 */
-	switch(phy_mode) {
-	case PHY_INTERFACE_MODE_QSGMII:
+	if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
-		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
-		break;
-	case PHY_INTERFACE_MODE_SGMII:
+	else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+		 phy_mode == PHY_INTERFACE_MODE_1000BASEX)
 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
-		break;
-	case PHY_INTERFACE_MODE_RGMII:
-	case PHY_INTERFACE_MODE_RGMII_ID:
-	case PHY_INTERFACE_MODE_RGMII_RXID:
-	case PHY_INTERFACE_MODE_RGMII_TXID:
-		ctrl |= MVNETA_GMAC2_PORT_RGMII;
-		break;
-	default:
+	else if (!phy_interface_mode_is_rgmii(phy_mode))
 		return -EINVAL;
-	}
-
-	/* Cancel Port Reset */
-	ctrl &= ~MVNETA_GMAC2_PORT_RESET;
-	mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
-
-	while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
-		MVNETA_GMAC2_PORT_RESET) != 0)
-		continue;
 
 	return 0;
 }
@@ -4136,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev)
 {
 	struct resource *res;
 	struct device_node *dn = pdev->dev.of_node;
-	struct device_node *phy_node;
 	struct device_node *bm_node;
 	struct mvneta_port *pp;
 	struct net_device *dev;
+	struct phylink *phylink;
 	const char *dt_mac_addr;
 	char hw_mac_addr[ETH_ALEN];
 	const char *mac_from;
-	const char *managed;
 	int tx_csum_limit;
 	int phy_mode;
 	int err;
@@ -4159,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev)
 		goto err_free_netdev;
 	}
 
-	phy_node = of_parse_phandle(dn, "phy", 0);
-	if (!phy_node) {
-		if (!of_phy_is_fixed_link(dn)) {
-			dev_err(&pdev->dev, "no PHY specified\n");
-			err = -ENODEV;
-			goto err_free_irq;
-		}
-
-		err = of_phy_register_fixed_link(dn);
-		if (err < 0) {
-			dev_err(&pdev->dev, "cannot register fixed PHY\n");
-			goto err_free_irq;
-		}
-
-		/* In the case of a fixed PHY, the DT node associated
-		 * to the PHY is the Ethernet MAC DT node.
-		 */
-		phy_node = of_node_get(dn);
-	}
-
 	phy_mode = of_get_phy_mode(dn);
 	if (phy_mode < 0) {
 		dev_err(&pdev->dev, "incorrect phy-mode\n");
 		err = -EINVAL;
-		goto err_put_phy_node;
+		goto err_free_irq;
+	}
+
+	phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode,
+				 &mvneta_phylink_ops);
+	if (IS_ERR(phylink)) {
+		err = PTR_ERR(phylink);
+		goto err_free_irq;
 	}
 
 	dev->tx_queue_len = MVNETA_MAX_TXD;
@@ -4194,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	pp = netdev_priv(dev);
 	spin_lock_init(&pp->lock);
-	pp->phy_node = phy_node;
+	pp->phylink = phylink;
 	pp->phy_interface = phy_mode;
-
-	err = of_property_read_string(dn, "managed", &managed);
-	pp->use_inband_status = (err == 0 &&
-				 strcmp(managed, "in-band-status") == 0);
+	pp->dn = dn;
 
 	pp->rxq_def = rxq_def;
 
@@ -4221,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev)
 		pp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pp->clk)) {
 		err = PTR_ERR(pp->clk);
-		goto err_put_phy_node;
+		goto err_free_phylink;
 	}
 
 	clk_prepare_enable(pp->clk);
@@ -4358,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	platform_set_drvdata(pdev, pp->dev);
 
-	if (pp->use_inband_status) {
-		struct phy_device *phy = of_phy_find_device(dn);
-
-		mvneta_fixed_link_update(pp, phy);
-
-		put_device(&phy->mdio.dev);
-	}
-
 	return 0;
 
 err_netdev:
@@ -4382,10 +4506,9 @@ static int mvneta_probe(struct platform_device *pdev)
 err_clk:
 	clk_disable_unprepare(pp->clk_bus);
 	clk_disable_unprepare(pp->clk);
-err_put_phy_node:
-	of_node_put(phy_node);
-	if (of_phy_is_fixed_link(dn))
-		of_phy_deregister_fixed_link(dn);
+err_free_phylink:
+	if (pp->phylink)
+		phylink_destroy(pp->phylink);
 err_free_irq:
 	irq_dispose_mapping(dev->irq);
 err_free_netdev:
@@ -4397,7 +4520,6 @@ static int mvneta_probe(struct platform_device *pdev)
 static int mvneta_remove(struct platform_device *pdev)
 {
 	struct net_device  *dev = platform_get_drvdata(pdev);
-	struct device_node *dn = pdev->dev.of_node;
 	struct mvneta_port *pp = netdev_priv(dev);
 
 	unregister_netdev(dev);
@@ -4405,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev)
 	clk_disable_unprepare(pp->clk);
 	free_percpu(pp->ports);
 	free_percpu(pp->stats);
-	if (of_phy_is_fixed_link(dn))
-		of_phy_deregister_fixed_link(dn);
 	irq_dispose_mapping(dev->irq);
-	of_node_put(pp->phy_node);
+	phylink_destroy(pp->phylink);
 	free_netdev(dev);
 
 	if (pp->bm_priv) {
@@ -4426,8 +4546,10 @@ static int mvneta_suspend(struct device *device)
 	struct net_device *dev = dev_get_drvdata(device);
 	struct mvneta_port *pp = netdev_priv(dev);
 
+	rtnl_lock();
 	if (netif_running(dev))
 		mvneta_stop(dev);
+	rtnl_unlock();
 	netif_device_detach(dev);
 	clk_disable_unprepare(pp->clk_bus);
 	clk_disable_unprepare(pp->clk);
@@ -4460,14 +4582,13 @@ static int mvneta_resume(struct device *device)
 		return err;
 	}
 
-	if (pp->use_inband_status)
-		mvneta_fixed_link_update(pp, dev->phydev);
-
 	netif_device_attach(dev);
+	rtnl_lock();
 	if (netif_running(dev)) {
 		mvneta_open(dev);
 		mvneta_set_rx_mode(dev);
 	}
+	rtnl_unlock();
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 634b2f4..a197607 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -454,11 +454,11 @@
 /* Various constants */
 
 /* Coalescing */
-#define MVPP2_TXDONE_COAL_PKTS_THRESH	15
+#define MVPP2_TXDONE_COAL_PKTS_THRESH	64
 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS	1000000UL
 #define MVPP2_TXDONE_COAL_USEC		1000
 #define MVPP2_RX_COAL_PKTS		32
-#define MVPP2_RX_COAL_USEC		100
+#define MVPP2_RX_COAL_USEC		64
 
 /* The two bytes Marvell header. Either contains a special value used
  * by Marvell switches when a specific hardware mode is enabled (not
@@ -504,10 +504,12 @@
 #define MVPP2_DEFAULT_RXQ		4
 
 /* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD			128
+#define MVPP2_MAX_RXD_MAX		1024
+#define MVPP2_MAX_RXD_DFLT		128
 
 /* Max number of Tx descriptors */
-#define MVPP2_MAX_TXD			1024
+#define MVPP2_MAX_TXD_MAX		2048
+#define MVPP2_MAX_TXD_DFLT		1024
 
 /* Amount of Tx descriptors that can be reserved at once by CPU */
 #define MVPP2_CPU_DESC_CHUNK		64
@@ -5802,6 +5804,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
 		txq_pcpu->reserved_num = 0;
 		txq_pcpu->txq_put_index = 0;
 		txq_pcpu->txq_get_index = 0;
+		txq_pcpu->tso_headers = NULL;
 
 		txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
 		txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
@@ -5829,10 +5832,13 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
 		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
 		kfree(txq_pcpu->buffs);
 
-		dma_free_coherent(port->dev->dev.parent,
-				  txq_pcpu->size * TSO_HEADER_SIZE,
-				  txq_pcpu->tso_headers,
-				  txq_pcpu->tso_headers_dma);
+		if (txq_pcpu->tso_headers)
+			dma_free_coherent(port->dev->dev.parent,
+					  txq_pcpu->size * TSO_HEADER_SIZE,
+					  txq_pcpu->tso_headers,
+					  txq_pcpu->tso_headers_dma);
+
+		txq_pcpu->tso_headers = NULL;
 	}
 
 	if (txq->descs)
@@ -6832,13 +6838,13 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
 	if (ring->rx_pending == 0 || ring->tx_pending == 0)
 		return -EINVAL;
 
-	if (ring->rx_pending > MVPP2_MAX_RXD)
-		new_rx_pending = MVPP2_MAX_RXD;
+	if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
+		new_rx_pending = MVPP2_MAX_RXD_MAX;
 	else if (!IS_ALIGNED(ring->rx_pending, 16))
 		new_rx_pending = ALIGN(ring->rx_pending, 16);
 
-	if (ring->tx_pending > MVPP2_MAX_TXD)
-		new_tx_pending = MVPP2_MAX_TXD;
+	if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
+		new_tx_pending = MVPP2_MAX_TXD_MAX;
 	else if (!IS_ALIGNED(ring->tx_pending, 32))
 		new_tx_pending = ALIGN(ring->tx_pending, 32);
 
@@ -7318,9 +7324,10 @@ static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 
-	c->rx_coalesce_usecs        = port->rxqs[0]->time_coal;
-	c->rx_max_coalesced_frames  = port->rxqs[0]->pkts_coal;
-	c->tx_max_coalesced_frames =  port->txqs[0]->done_pkts_coal;
+	c->rx_coalesce_usecs       = port->rxqs[0]->time_coal;
+	c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
+	c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
+	c->tx_coalesce_usecs       = port->tx_time_coal;
 	return 0;
 }
 
@@ -7340,8 +7347,8 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
 {
 	struct mvpp2_port *port = netdev_priv(dev);
 
-	ring->rx_max_pending = MVPP2_MAX_RXD;
-	ring->tx_max_pending = MVPP2_MAX_TXD;
+	ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
+	ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
 	ring->rx_pending = port->rx_ring_size;
 	ring->tx_pending = port->tx_ring_size;
 }
@@ -7788,7 +7795,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 		goto err_free_netdev;
 	}
 
-	dev->tx_queue_len = MVPP2_MAX_TXD;
+	dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
 	dev->watchdog_timeo = 5 * HZ;
 	dev->netdev_ops = &mvpp2_netdev_ops;
 	dev->ethtool_ops = &mvpp2_eth_tool_ops;
@@ -7871,8 +7878,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 
 	mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from);
 
-	port->tx_ring_size = MVPP2_MAX_TXD;
-	port->rx_ring_size = MVPP2_MAX_RXD;
+	port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
+	port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
 	err = mvpp2_port_init(port);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 9efe177..9fe8530 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
 			dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
 			return -ETIMEDOUT;
 		}
-		mdelay(1);
+		msleep(1);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index fc67e35..29826dd 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1952,14 +1952,16 @@ static int mtk_hw_init(struct mtk_eth *eth)
 	}
 	regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 
-	/* Set GE2 driving and slew rate */
-	regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+	if (eth->pctl) {
+		/* Set GE2 driving and slew rate */
+		regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
 
-	/* set GE2 TDSEL */
-	regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+		/* set GE2 TDSEL */
+		regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
 
-	/* set GE2 TUNE */
-	regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+		/* set GE2 TUNE */
+		regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+	}
 
 	/* Set linkdown as the default for each GMAC. Its own MCR would be set
 	 * up with the more appropriate value when mtk_phy_link_adjust call is
@@ -2538,11 +2540,13 @@ static int mtk_probe(struct platform_device *pdev)
 		}
 	}
 
-	eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-						    "mediatek,pctl");
-	if (IS_ERR(eth->pctl)) {
-		dev_err(&pdev->dev, "no pctl regmap found\n");
-		return PTR_ERR(eth->pctl);
+	if (eth->soc->required_pctl) {
+		eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+							    "mediatek,pctl");
+		if (IS_ERR(eth->pctl)) {
+			dev_err(&pdev->dev, "no pctl regmap found\n");
+			return PTR_ERR(eth->pctl);
+		}
 	}
 
 	for (i = 0; i < 3; i++) {
@@ -2668,17 +2672,20 @@ static int mtk_remove(struct platform_device *pdev)
 
 static const struct mtk_soc_data mt2701_data = {
 	.caps = MTK_GMAC1_TRGMII,
-	.required_clks = MT7623_CLKS_BITMAP
+	.required_clks = MT7623_CLKS_BITMAP,
+	.required_pctl = true,
 };
 
 static const struct mtk_soc_data mt7622_data = {
 	.caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
-	.required_clks = MT7622_CLKS_BITMAP
+	.required_clks = MT7622_CLKS_BITMAP,
+	.required_pctl = false,
 };
 
 static const struct mtk_soc_data mt7623_data = {
 	.caps = MTK_GMAC1_TRGMII,
-	.required_clks = MT7623_CLKS_BITMAP
+	.required_clks = MT7623_CLKS_BITMAP,
+	.required_pctl = true,
 };
 
 const struct of_device_id of_mtk_match[] = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index a3af466..672b8c3 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -573,10 +573,13 @@ struct mtk_rx_ring {
  * @caps			Flags shown the extra capability for the SoC
  * @required_clks		Flags shown the bitmap for required clocks on
  *				the target SoC
+ * @required_pctl		A bool value to show whether the SoC requires
+ *				the extra setup for those pins used by GMAC.
  */
 struct mtk_soc_data {
 	u32		caps;
 	u32		required_clks;
+	bool		required_pctl;
 };
 
 /* currently no SoC has more than 2 macs */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 5f41dc9..1a0c3bf8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
 		}
 
 		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
 		case IEEE_8021QAZ_TSA_STRICT:
 			break;
 		case IEEE_8021QAZ_TSA_ETS:
@@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
 	/* higher TC means higher priority => lower pg */
 	for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
 		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
+			pg[i] = MLX4_EN_TC_VENDOR;
+			tc_tx_bw[i] = MLX4_EN_BW_MAX;
+			break;
 		case IEEE_8021QAZ_TSA_STRICT:
 			pg[i] = num_strict++;
 			tc_tx_bw[i] = MLX4_EN_BW_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index bf1f041..ebc1f56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
 	if (param->rx_jumbo_pending || param->rx_mini_pending)
 		return -EINVAL;
 
+	if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
+		en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
+			__func__, param->rx_pending,
+			MLX4_EN_MIN_RX_SIZE);
+		return -EINVAL;
+	}
+	if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
+		en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
+			__func__, param->tx_pending,
+			MLX4_EN_MIN_TX_SIZE);
+		return -EINVAL;
+	}
+
 	rx_size = roundup_pow_of_two(param->rx_pending);
-	rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
-	rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
 	tx_size = roundup_pow_of_two(param->tx_pending);
-	tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
-	tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
 
 	if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
 					priv->rx_ring[0]->size) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 99051a2..8fc51bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 
 		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
 					   prof->rx_ring_size, priv->stride,
-					   node))
+					   node, i))
 			goto err;
+
 	}
 
 #ifdef CONFIG_RFS_ACCEL
@@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 	priv->msg_enable = MLX4_EN_MSG_LEVEL;
 #ifdef CONFIG_MLX4_EN_DCB
 	if (!mlx4_is_slave(priv->mdev->dev)) {
+		u8 prio;
+
+		for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
+			priv->ets.prio_tc[prio] = prio;
+			priv->ets.tc_tsa[prio]  = IEEE_8021QAZ_TSA_VENDOR;
+		}
+
 		priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
 			DCB_CAP_DCBX_VER_IEEE;
 		priv->flags |= MLX4_EN_DCB_ENABLED;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 85e28ef..b4d144e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
 
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_rx_ring **pring,
-			   u32 size, u16 stride, int node)
+			   u32 size, u16 stride, int node, int queue_index)
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_en_rx_ring *ring;
@@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 	ring->log_stride = ffs(ring->stride) - 1;
 	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
 
+	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
+		goto err_ring;
+
 	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
 					sizeof(struct mlx4_en_rx_alloc));
 	ring->rx_info = vzalloc_node(tmp, node);
@@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 		ring->rx_info = vzalloc(tmp);
 		if (!ring->rx_info) {
 			err = -ENOMEM;
-			goto err_ring;
+			goto err_xdp_info;
 		}
 	}
 
@@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 err_info:
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
+err_xdp_info:
+	xdp_rxq_info_unreg(&ring->xdp_rxq);
 err_ring:
 	kfree(ring);
 	*pring = NULL;
@@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 					lockdep_is_held(&mdev->state_lock));
 	if (old_prog)
 		bpf_prog_put(old_prog);
+	xdp_rxq_info_unreg(&ring->xdp_rxq);
 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
 	vfree(ring->rx_info);
 	ring->rx_info = NULL;
@@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
 	return 0;
 }
 #endif
+
+/* We reach this function only after checking that any of
+ * the (IPv4 | IPv6) bits are set in cqe->status.
+ */
 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
 		      netdev_features_t dev_features)
 {
@@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
 		hdr += sizeof(struct vlan_hdr);
 	}
 
-	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
-		return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 #if IS_ENABLED(CONFIG_IPV6)
 	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
 		return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
 #endif
-	return 0;
+	return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 }
 
 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
@@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 	int cq_ring = cq->ring;
 	bool doorbell_pending;
 	struct mlx4_cqe *cqe;
+	struct xdp_buff xdp;
 	int polled = 0;
 	int index;
 
@@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 	/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
 	rcu_read_lock();
 	xdp_prog = rcu_dereference(ring->xdp_prog);
+	xdp.rxq = &ring->xdp_rxq;
 	doorbell_pending = 0;
 
 	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 		 * read bytes but not past the end of the frag.
 		 */
 		if (xdp_prog) {
-			struct xdp_buff xdp;
 			dma_addr_t dma;
 			void *orig_data;
 			u32 act;
@@ -814,33 +823,33 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
 		if (likely(dev->features & NETIF_F_RXCSUM)) {
 			if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
 						      MLX4_CQE_STATUS_UDP)) {
-				if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
-				    cqe->checksum == cpu_to_be16(0xffff)) {
-					bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
-						(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+				bool l2_tunnel;
 
-					ip_summed = CHECKSUM_UNNECESSARY;
-					hash_type = PKT_HASH_TYPE_L4;
-					if (l2_tunnel)
-						skb->csum_level = 1;
-					ring->csum_ok++;
-				} else {
+				if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+				      cqe->checksum == cpu_to_be16(0xffff)))
 					goto csum_none;
-				}
+
+				l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
+					(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+				ip_summed = CHECKSUM_UNNECESSARY;
+				hash_type = PKT_HASH_TYPE_L4;
+				if (l2_tunnel)
+					skb->csum_level = 1;
+				ring->csum_ok++;
 			} else {
-				if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
-				    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-							       MLX4_CQE_STATUS_IPV6))) {
-					if (check_csum(cqe, skb, va, dev->features)) {
-						goto csum_none;
-					} else {
-						ip_summed = CHECKSUM_COMPLETE;
-						hash_type = PKT_HASH_TYPE_L3;
-						ring->csum_complete++;
-					}
-				} else {
+				if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
+				      (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+#if IS_ENABLED(CONFIG_IPV6)
+								 MLX4_CQE_STATUS_IPV6))))
+#else
+								 0))))
+#endif
 					goto csum_none;
-				}
+				if (check_csum(cqe, skb, va, dev->features))
+					goto csum_none;
+				ip_summed = CHECKSUM_COMPLETE;
+				hash_type = PKT_HASH_TYPE_L3;
+				ring->csum_complete++;
 			}
 		} else {
 csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2b72677..f470ae3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -46,6 +46,7 @@
 #endif
 #include <linux/cpu_rmap.h>
 #include <linux/ptp_clock_kernel.h>
+#include <net/xdp.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/qp.h>
@@ -356,6 +357,7 @@ struct mlx4_en_rx_ring {
 	unsigned long dropped;
 	int hwtstamp_rx_filter;
 	cpumask_var_t affinity_mask;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct mlx4_en_cq {
@@ -479,6 +481,7 @@ struct mlx4_en_frag_info {
 #define MLX4_EN_BW_MIN 1
 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
 
+#define MLX4_EN_TC_VENDOR 0
 #define MLX4_EN_TC_ETS 7
 
 enum dcb_pfc_type {
@@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
 void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
 			   struct mlx4_en_rx_ring **pring,
-			   u32 size, u16 stride, int node);
+			   u32 size, u16 stride, int node, int queue_index);
 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
 			     struct mlx4_en_rx_ring **pring,
 			     u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index c7c0764..2e84f10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
 		    u32 *lkey, u32 *rkey)
 {
-	struct mlx4_cmd_mailbox *mailbox;
-	int err;
-
 	if (!fmr->maps)
 		return;
 
+	/* To unmap: it is sufficient to take back ownership from HW */
+	*(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+
+	/* Make sure MPT status is visible */
+	wmb();
+
 	fmr->maps = 0;
-
-	mailbox = mlx4_alloc_cmd_mailbox(dev);
-	if (IS_ERR(mailbox)) {
-		err = PTR_ERR(mailbox);
-		pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
-		return;
-	}
-
-	err = mlx4_HW2SW_MPT(dev, NULL,
-			     key_to_hw_index(fmr->mr.key) &
-			     (dev->caps.num_mpts - 1));
-	mlx4_free_cmd_mailbox(dev, mailbox);
-	if (err) {
-		pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
-		return;
-	}
-	fmr->mr.enabled = MLX4_MPT_EN_SW;
 }
 EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
 
@@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
 
 	if (fmr->maps)
 		return -EBUSY;
+	if (fmr->mr.enabled == MLX4_MPT_EN_HW) {
+		/* In case of FMR was enabled and unmapped
+		 * make sure to give ownership of MPT back to HW
+		 * so HW2SW_MPT command will success.
+		 */
+		*(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW;
+		/* Make sure MPT status is visible before changing MPT fields */
+		wmb();
+		fmr->mpt->length = 0;
+		fmr->mpt->start  = 0;
+		/* Make sure MPT data is visible after changing MPT status */
+		wmb();
+		*(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW;
+		/* make sure MPT status is visible */
+		wmb();
+	}
 
 	ret = mlx4_mr_free(dev, &fmr->mr);
 	if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 19b21b4..c805769 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@
 		fpga/ipsec.o
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
-		en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
+		en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \
 		en_arfs.o en_fs_ethtool.o en_selftest.o
 
 mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 543060c..d629da2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -46,6 +46,8 @@
 #include <linux/mlx5/transobj.h>
 #include <linux/rhashtable.h>
 #include <net/switchdev.h>
+#include <net/xdp.h>
+#include <linux/net_dim.h>
 #include "wq.h"
 #include "mlx5_core.h"
 #include "en_stats.h"
@@ -226,12 +228,6 @@ enum mlx5e_priv_flag {
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
 #endif
 
-struct mlx5e_cq_moder {
-	u16 usec;
-	u16 pkts;
-	u8 cq_period_mode;
-};
-
 struct mlx5e_params {
 	u8  log_sq_size;
 	u8  rq_wq_type;
@@ -242,8 +238,8 @@ struct mlx5e_params {
 	u16 num_channels;
 	u8  num_tc;
 	bool rx_cqe_compress_def;
-	struct mlx5e_cq_moder rx_cq_moderation;
-	struct mlx5e_cq_moder tx_cq_moderation;
+	struct net_dim_cq_moder rx_cq_moderation;
+	struct net_dim_cq_moder tx_cq_moderation;
 	bool lro_en;
 	u32 lro_wqe_sz;
 	u16 tx_max_inline;
@@ -253,7 +249,7 @@ struct mlx5e_params {
 	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 	bool vlan_strip_disable;
 	bool scatter_fcs_en;
-	bool rx_am_enabled;
+	bool rx_dim_enabled;
 	u32 lro_timeout;
 	u32 pflags;
 	struct bpf_prog *xdp_prog;
@@ -472,32 +468,6 @@ struct mlx5e_mpw_info {
 	u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
 };
 
-struct mlx5e_rx_am_stats {
-	int ppms; /* packets per msec */
-	int bpms; /* bytes per msec */
-	int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
-	ktime_t	time;
-	u32	pkt_ctr;
-	u32	byte_ctr;
-	u16	event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
-	u8					state;
-	struct mlx5e_rx_am_stats		prev_stats;
-	struct mlx5e_rx_am_sample		start_sample;
-	struct work_struct			work;
-	u8					profile_ix;
-	u8					mode;
-	u8					tune_state;
-	u8					steps_right;
-	u8					steps_left;
-	u8					tired;
-};
-
 /* a single cache unit is capable to serve one napi call (for non-striding rq)
  * or a MPWQE (for striding rq).
  */
@@ -558,7 +528,7 @@ struct mlx5e_rq {
 	unsigned long          state;
 	int                    ix;
 
-	struct mlx5e_rx_am     am; /* Adaptive Moderation */
+	struct net_dim         dim; /* Dynamic Interrupt Moderation */
 
 	/* XDP */
 	struct bpf_prog       *xdp_prog;
@@ -571,6 +541,9 @@ struct mlx5e_rq {
 	u32                    rqn;
 	struct mlx5_core_dev  *mdev;
 	struct mlx5_core_mkey  umr_mkey;
+
+	/* XDP read-mostly */
+	struct xdp_rxq_info    xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 struct mlx5e_channel {
@@ -655,6 +628,7 @@ struct mlx5e_tc_table {
 	struct rhashtable               ht;
 
 	DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+	DECLARE_HASHTABLE(hairpin_tbl, 8);
 };
 
 struct mlx5e_vlan_table {
@@ -860,10 +834,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
 
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
 void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
 
 int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
@@ -1110,4 +1080,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
 			    struct mlx5e_params *params,
 			    u16 max_channels);
 u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
+void mlx5e_rx_dim_work(struct work_struct *work);
 #endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
new file mode 100644
index 0000000..602851a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/net_dim.h>
+#include "en.h"
+
+void mlx5e_rx_dim_work(struct work_struct *work)
+{
+	struct net_dim *dim = container_of(work, struct net_dim,
+					   work);
+	struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+	struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode,
+								  dim->profile_ix);
+
+	mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
+				       cur_profile.usec, cur_profile.pkts);
+
+	dim->state = NET_DIM_START_MEASURE;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8f05efa..bd5af7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -296,7 +296,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 	struct mlx5e_channels new_channels = {};
 	u32 rx_pending_wqes;
 	u32 min_rq_size;
-	u32 max_rq_size;
 	u8 log_rq_size;
 	u8 log_sq_size;
 	u32 num_mtts;
@@ -315,8 +314,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 
 	min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
 					       1 << mlx5_min_log_rq_size(rq_wq_type));
-	max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
-					       1 << mlx5_max_log_rq_size(rq_wq_type));
 	rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
 						   param->rx_pending);
 
@@ -326,12 +323,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 			    min_rq_size);
 		return -EINVAL;
 	}
-	if (param->rx_pending > max_rq_size) {
-		netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n",
-			    __func__, param->rx_pending,
-			    max_rq_size);
-		return -EINVAL;
-	}
 
 	num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
 	if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
@@ -347,12 +338,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
 			    1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
 		return -EINVAL;
 	}
-	if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
-		netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n",
-			    __func__, param->tx_pending,
-			    1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
-		return -EINVAL;
-	}
 
 	log_rq_size = order_base_2(rx_pending_wqes);
 	log_sq_size = order_base_2(param->tx_pending);
@@ -480,7 +465,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
 	coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
 	coal->tx_coalesce_usecs       = priv->channels.params.tx_cq_moderation.usec;
 	coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
-	coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
+	coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
 
 	return 0;
 }
@@ -534,7 +519,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
 	new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
 	new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
 	new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
-	new_channels.params.rx_am_enabled         = !!coal->use_adaptive_rx_coalesce;
+	new_channels.params.rx_dim_enabled        = !!coal->use_adaptive_rx_coalesce;
 
 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 		priv->channels.params = new_channels.params;
@@ -542,7 +527,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
 	}
 	/* we are opened */
 
-	reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
+	reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
 	if (!reset) {
 		mlx5e_set_priv_channels_coalesce(priv, coal);
 		priv->channels.params = new_channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d9d8227..bbbdb5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -582,6 +582,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 		goto err_rq_wq_destroy;
 	}
 
+	err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix);
+	if (err < 0)
+		goto err_rq_wq_destroy;
+
 	rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
 	rq->buff.headroom = params->rq_headroom;
 
@@ -674,8 +678,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 		wqe->data.lkey = rq->mkey_be;
 	}
 
-	INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
-	rq->am.mode = params->rx_cq_moderation.cq_period_mode;
+	INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
+
+	switch (params->rx_cq_moderation.cq_period_mode) {
+	case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+		break;
+	case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+	default:
+		rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+	}
+
 	rq->page_cache.head = 0;
 	rq->page_cache.tail = 0;
 
@@ -687,6 +700,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 err_rq_wq_destroy:
 	if (rq->xdp_prog)
 		bpf_prog_put(rq->xdp_prog);
+	xdp_rxq_info_unreg(&rq->xdp_rxq);
 	mlx5_wq_destroy(&rq->wq_ctrl);
 
 	return err;
@@ -699,6 +713,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
 	if (rq->xdp_prog)
 		bpf_prog_put(rq->xdp_prog);
 
+	xdp_rxq_info_unreg(&rq->xdp_rxq);
+
 	switch (rq->wq_type) {
 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 		mlx5e_rq_free_mpwqe_info(rq);
@@ -919,7 +935,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
 	if (err)
 		goto err_destroy_rq;
 
-	if (params->rx_am_enabled)
+	if (params->rx_dim_enabled)
 		c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
 
 	return 0;
@@ -952,7 +968,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
-	cancel_work_sync(&rq->am.work);
+	cancel_work_sync(&rq->dim.work);
 	mlx5e_destroy_rq(rq);
 	mlx5e_free_rx_descs(rq);
 	mlx5e_free_rq(rq);
@@ -1565,7 +1581,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
 }
 
 static int mlx5e_open_cq(struct mlx5e_channel *c,
-			 struct mlx5e_cq_moder moder,
+			 struct net_dim_cq_moder moder,
 			 struct mlx5e_cq_param *param,
 			 struct mlx5e_cq *cq)
 {
@@ -1747,7 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 			      struct mlx5e_channel_param *cparam,
 			      struct mlx5e_channel **cp)
 {
-	struct mlx5e_cq_moder icocq_moder = {0, 0};
+	struct net_dim_cq_moder icocq_moder = {0, 0};
 	struct net_device *netdev = priv->netdev;
 	int cpu = mlx5e_get_cpu(priv, ix);
 	struct mlx5e_channel *c;
@@ -1999,7 +2015,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
 
 	mlx5e_build_common_cq_param(priv, param);
 
-	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+	param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
 }
 
 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
@@ -2766,6 +2782,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
 	if (err)
 		return err;
 
+	/* Mark as unused given "Drop-RQ" packets never reach XDP */
+	xdp_rxq_info_unused(&rq->xdp_rxq);
+
 	rq->mdev = mdev;
 
 	return 0;
@@ -4038,9 +4057,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
 		params->rx_cq_moderation.usec =
 			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
 
-	if (params->rx_am_enabled)
-		params->rx_cq_moderation =
-			mlx5e_am_get_def_profile(cq_period_mode);
+	if (params->rx_dim_enabled) {
+		switch (cq_period_mode) {
+		case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
+			params->rx_cq_moderation =
+				net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE);
+			break;
+		case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
+		default:
+			params->rx_cq_moderation =
+				net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+		}
+	}
 
 	MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
 			params->rx_cq_moderation.cq_period_mode ==
@@ -4102,7 +4130,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
 	cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
 			MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
 			MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-	params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 	mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
 
@@ -4307,9 +4335,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
 {
 	mlx5e_ipsec_cleanup(priv);
 	mlx5e_vxlan_cleanup(priv);
-
-	if (priv->channels.params.xdp_prog)
-		bpf_prog_put(priv->channels.params.xdp_prog);
 }
 
 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606..4d1b0ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 	return 0;
 }
 
+static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep)
+{
+	struct mlx5e_rep_sq *rep_sq, *tmp;
+	struct mlx5e_rep_priv *rpriv;
+
+	if (esw->mode != SRIOV_OFFLOADS)
+		return;
+
+	rpriv = mlx5e_rep_to_rep_priv(rep);
+	list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
+		mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+		list_del(&rep_sq->list);
+		kfree(rep_sq);
+	}
+}
+
+static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep,
+				 u16 *sqns_array, int sqns_num)
+{
+	struct mlx5_flow_handle *flow_rule;
+	struct mlx5e_rep_priv *rpriv;
+	struct mlx5e_rep_sq *rep_sq;
+	int err;
+	int i;
+
+	if (esw->mode != SRIOV_OFFLOADS)
+		return 0;
+
+	rpriv = mlx5e_rep_to_rep_priv(rep);
+	for (i = 0; i < sqns_num; i++) {
+		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
+		if (!rep_sq) {
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		/* Add re-inject rule to the PF/representor sqs */
+		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+								rep->vport,
+								sqns_array[i]);
+		if (IS_ERR(flow_rule)) {
+			err = PTR_ERR(flow_rule);
+			kfree(rep_sq);
+			goto out_err;
+		}
+		rep_sq->send_to_vport_rule = flow_rule;
+		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
+	}
+	return 0;
+
+out_err:
+	mlx5e_sqs2vport_stop(esw, rep);
+	return err;
+}
+
 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
 {
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
 			sqs[num_sqs++] = c->sq[tc].sqn;
 	}
 
-	err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+	err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
 	kfree(sqs);
 
 out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
 	struct mlx5_eswitch_rep *rep = rpriv->rep;
 
-	mlx5_eswitch_sqs2vport_stop(esw, rep);
+	mlx5e_sqs2vport_stop(esw, rep);
 }
 
 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
 #endif
 	unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
 						DELAY_PROBE_TIME);
-	struct net_device *netdev = rpriv->rep->netdev;
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 
 	rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
 {
 	struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
 						    neigh_update.neigh_stats_work.work);
-	struct net_device *netdev = rpriv->rep->netdev;
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_neigh_hash_entry *nhe;
 
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
 	struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
 						    neigh_update.netevent_nb);
 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-	struct net_device *netdev = rpriv->rep->netdev;
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5e_neigh_hash_entry *nhe = NULL;
 	struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
 {
 	struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
-	struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
+	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
 	unregister_netevent_notifier(&neigh_update->netevent_nb);
 
@@ -827,7 +884,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
 	params->rq_wq_type  = MLX5_WQ_TYPE_LINKED_LIST;
 	params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
 
-	params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+	params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
 	mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 
 	params->tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
@@ -904,7 +961,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
 		err = PTR_ERR(flow_rule);
 		goto err_destroy_direct_tirs;
 	}
-	rep->vport_rx_rule = flow_rule;
+	rpriv->vport_rx_rule = flow_rule;
 
 	err = mlx5e_tc_init(priv);
 	if (err)
@@ -913,7 +970,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
 	return 0;
 
 err_del_flow_rule:
-	mlx5_del_flow_rules(rep->vport_rx_rule);
+	mlx5_del_flow_rules(rpriv->vport_rx_rule);
 err_destroy_direct_tirs:
 	mlx5e_destroy_direct_tirs(priv);
 err_destroy_direct_rqts:
@@ -924,10 +981,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
 {
 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
-	struct mlx5_eswitch_rep *rep = rpriv->rep;
 
 	mlx5e_tc_cleanup(priv);
-	mlx5_del_flow_rules(rep->vport_rx_rule);
+	mlx5_del_flow_rules(rpriv->vport_rx_rule);
 	mlx5e_destroy_direct_tirs(priv);
 	mlx5e_destroy_direct_rqts(priv);
 }
@@ -967,10 +1023,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
 /* e-Switch vport representors */
 
 static int
-mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 {
-	struct mlx5e_priv *priv = netdev_priv(rep->netdev);
-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
 	int err;
 
@@ -992,10 +1048,10 @@ mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 }
 
 static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
 {
-	struct mlx5e_priv *priv = netdev_priv(rep->netdev);
-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
 
 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
 		mlx5e_remove_sqs_fwd_rules(priv);
@@ -1008,8 +1064,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 }
 
 static int
-mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
 {
+	struct mlx5e_rep_priv *uplink_rpriv;
 	struct mlx5e_rep_priv *rpriv;
 	struct net_device *netdev;
 	struct mlx5e_priv *upriv;
@@ -1019,7 +1076,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 	if (!rpriv)
 		return -ENOMEM;
 
-	netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
+	netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
 	if (!netdev) {
 		pr_warn("Failed to create representor netdev for vport %d\n",
 			rep->vport);
@@ -1027,8 +1084,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 		return -EINVAL;
 	}
 
-	rep->netdev = netdev;
+	rpriv->netdev = netdev;
 	rpriv->rep = rep;
+	rep->rep_if[REP_ETH].priv = rpriv;
+	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
 
 	err = mlx5e_attach_netdev(netdev_priv(netdev));
 	if (err) {
@@ -1044,7 +1103,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 		goto err_detach_netdev;
 	}
 
-	upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
+	upriv = netdev_priv(uplink_rpriv->netdev);
 	err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
 					 upriv);
 	if (err)
@@ -1076,16 +1136,19 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
 }
 
 static void
-mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
 {
-	struct net_device *netdev = rep->netdev;
+	struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+	struct net_device *netdev = rpriv->netdev;
 	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_rep_priv *rpriv = priv->ppriv;
+	struct mlx5e_rep_priv *uplink_rpriv;
 	void *ppriv = priv->ppriv;
 	struct mlx5e_priv *upriv;
 
-	unregister_netdev(rep->netdev);
-	upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+	unregister_netdev(netdev);
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
+						    REP_ETH);
+	upriv = netdev_priv(uplink_rpriv->netdev);
 	tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
 				     upriv);
 	mlx5e_rep_neigh_cleanup(rpriv);
@@ -1100,18 +1163,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
 	struct mlx5_eswitch *esw   = mdev->priv.eswitch;
 	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
 	int vport;
-	u8 mac[ETH_ALEN];
-
-	mlx5_query_nic_vport_mac_address(mdev, 0, mac);
 
 	for (vport = 1; vport < total_vfs; vport++) {
-		struct mlx5_eswitch_rep rep;
+		struct mlx5_eswitch_rep_if rep_if = {};
 
-		rep.load = mlx5e_vport_rep_load;
-		rep.unload = mlx5e_vport_rep_unload;
-		rep.vport = vport;
-		ether_addr_copy(rep.hw_id, mac);
-		mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+		rep_if.load = mlx5e_vport_rep_load;
+		rep_if.unload = mlx5e_vport_rep_unload;
+		mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
 	}
 }
 
@@ -1123,21 +1181,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
 	int vport;
 
 	for (vport = 1; vport < total_vfs; vport++)
-		mlx5_eswitch_unregister_vport_rep(esw, vport);
+		mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
 }
 
 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_eswitch *esw   = mdev->priv.eswitch;
-	struct mlx5_eswitch_rep rep;
+	struct mlx5_eswitch_rep_if rep_if;
+	struct mlx5e_rep_priv *rpriv;
 
-	mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
-	rep.load = mlx5e_nic_rep_load;
-	rep.unload = mlx5e_nic_rep_unload;
-	rep.vport = FDB_UPLINK_VPORT;
-	rep.netdev = priv->netdev;
-	mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
+	rpriv = priv->ppriv;
+	rpriv->netdev = priv->netdev;
+
+	rep_if.load = mlx5e_nic_rep_load;
+	rep_if.unload = mlx5e_nic_rep_unload;
+	rep_if.priv = rpriv;
+	INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+	mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
 
 	mlx5e_rep_register_vf_vports(priv); /* VFs vports */
 }
@@ -1148,7 +1209,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
 	struct mlx5_eswitch *esw   = mdev->priv.eswitch;
 
 	mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
-	mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
+	mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
 }
 
 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 5659ed9..b9b481f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
 struct mlx5e_rep_priv {
 	struct mlx5_eswitch_rep *rep;
 	struct mlx5e_neigh_update_table neigh_update;
+	struct net_device      *netdev;
+	struct mlx5_flow_handle *vport_rx_rule;
+	struct list_head       vport_sqs_list;
 };
 
+static inline
+struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
+{
+	return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
+}
+
 struct mlx5e_neigh {
 	struct net_device *dev;
 	union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
 	int encap_size;
 };
 
+struct mlx5e_rep_sq {
+	struct mlx5_flow_handle	*send_to_vport_rule;
+	struct list_head	 list;
+};
+
 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
 void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 5b499c7..ff234df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
 	mlx5_cqwq_pop(&cq->wq);
 
 	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
-		WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
-			  cqe->op_own);
+		netdev_WARN_ONCE(cq->channel->netdev,
+				 "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
 		return;
 	}
 
@@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
 	}
 
 	if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
-		WARN_ONCE(true,
-			  "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
-			  icowi->opcode);
+		netdev_WARN_ONCE(cq->channel->netdev,
+				 "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
 }
 
 static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
@@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = xdp.data + *len;
 	xdp.data_hard_start = va;
+	xdp.rxq = &rq->xdp_rxq;
 
 	act = bpf_prog_run_xdp(prog, &xdp);
 	switch (act) {
@@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 					 u32 cqe_bcnt,
 					 struct sk_buff *skb)
 {
+	struct hwtstamp_config *tstamp;
 	struct net_device *netdev;
+	struct mlx5e_priv *priv;
 	char *pseudo_header;
 	u32 qpn;
 	u8 *dgid;
@@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 		return;
 	}
 
+	priv = mlx5i_epriv(netdev);
+	tstamp = &priv->tstamp;
+
 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
 	if ((!g) || dgid[0] != 0xff)
@@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 	skb->ip_summed = CHECKSUM_COMPLETE;
 	skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
 
-	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
 		skb_hwtstamps(skb)->hwtstamp =
 				mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index e401d9d..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
-	{1,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{8,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{64,  MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
-	{2,  256},             \
-	{8,  128},             \
-	{16, 64},              \
-	{32, 64},              \
-	{64, 64}               \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
-	MLX5_AM_EQE_PROFILES,
-	MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
-	struct mlx5e_cq_moder cq_moder;
-
-	cq_moder = profile[cq_period_mode][ix];
-	cq_moder.cq_period_mode = cq_period_mode;
-	return cq_moder;
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
-	int default_profile_ix;
-
-	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
-		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
-	else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
-		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
-	return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
-}
-
-/* Adaptive moderation logic */
-enum {
-	MLX5E_AM_START_MEASURE,
-	MLX5E_AM_MEASURE_IN_PROGRESS,
-	MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
-	MLX5E_AM_PARKING_ON_TOP,
-	MLX5E_AM_PARKING_TIRED,
-	MLX5E_AM_GOING_RIGHT,
-	MLX5E_AM_GOING_LEFT,
-};
-
-enum {
-	MLX5E_AM_STATS_WORSE,
-	MLX5E_AM_STATS_SAME,
-	MLX5E_AM_STATS_BETTER,
-};
-
-enum {
-	MLX5E_AM_STEPPED,
-	MLX5E_AM_TOO_TIRED,
-	MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		return true;
-	case MLX5E_AM_GOING_RIGHT:
-		return (am->steps_left > 1) && (am->steps_right == 1);
-	default: /* MLX5E_AM_GOING_LEFT */
-		return (am->steps_right > 1) && (am->steps_left == 1);
-	}
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		break;
-	case MLX5E_AM_GOING_RIGHT:
-		am->tune_state = MLX5E_AM_GOING_LEFT;
-		am->steps_left = 0;
-		break;
-	case MLX5E_AM_GOING_LEFT:
-		am->tune_state = MLX5E_AM_GOING_RIGHT;
-		am->steps_right = 0;
-		break;
-	}
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
-	if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
-		return MLX5E_AM_TOO_TIRED;
-
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		break;
-	case MLX5E_AM_GOING_RIGHT:
-		if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
-			return MLX5E_AM_ON_EDGE;
-		am->profile_ix++;
-		am->steps_right++;
-		break;
-	case MLX5E_AM_GOING_LEFT:
-		if (am->profile_ix == 0)
-			return MLX5E_AM_ON_EDGE;
-		am->profile_ix--;
-		am->steps_left++;
-		break;
-	}
-
-	am->tired++;
-	return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
-	am->steps_right  = 0;
-	am->steps_left   = 0;
-	am->tired        = 0;
-	am->tune_state   = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
-	am->steps_right  = 0;
-	am->steps_left   = 0;
-	am->tune_state   = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
-	am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
-					  MLX5E_AM_GOING_RIGHT;
-	mlx5e_am_step(am);
-}
-
-#define IS_SIGNIFICANT_DIFF(val, ref) \
-	(((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
-				  struct mlx5e_rx_am_stats *prev)
-{
-	if (!prev->bpms)
-		return curr->bpms ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_SAME;
-
-	if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
-		return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
-						   MLX5E_AM_STATS_WORSE;
-
-	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
-		return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
-						   MLX5E_AM_STATS_WORSE;
-
-	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
-		return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
-						   MLX5E_AM_STATS_WORSE;
-
-	return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
-			      struct mlx5e_rx_am *am)
-{
-	int prev_state = am->tune_state;
-	int prev_ix = am->profile_ix;
-	int stats_res;
-	int step_res;
-
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
-		if (stats_res != MLX5E_AM_STATS_SAME)
-			mlx5e_am_exit_parking(am);
-		break;
-
-	case MLX5E_AM_PARKING_TIRED:
-		am->tired--;
-		if (!am->tired)
-			mlx5e_am_exit_parking(am);
-		break;
-
-	case MLX5E_AM_GOING_RIGHT:
-	case MLX5E_AM_GOING_LEFT:
-		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
-		if (stats_res != MLX5E_AM_STATS_BETTER)
-			mlx5e_am_turn(am);
-
-		if (mlx5e_am_on_top(am)) {
-			mlx5e_am_park_on_top(am);
-			break;
-		}
-
-		step_res = mlx5e_am_step(am);
-		switch (step_res) {
-		case MLX5E_AM_ON_EDGE:
-			mlx5e_am_park_on_top(am);
-			break;
-		case MLX5E_AM_TOO_TIRED:
-			mlx5e_am_park_tired(am);
-			break;
-		}
-
-		break;
-	}
-
-	if ((prev_state     != MLX5E_AM_PARKING_ON_TOP) ||
-	    (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
-		am->prev_stats = *curr_stats;
-
-	return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
-			    struct mlx5e_rx_am_sample *s)
-{
-	s->time	     = ktime_get();
-	s->pkt_ctr   = rq->stats.packets;
-	s->byte_ctr  = rq->stats.bytes;
-	s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
-#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
-				struct mlx5e_rx_am_sample *end,
-				struct mlx5e_rx_am_stats *curr_stats)
-{
-	/* u32 holds up to 71 minutes, should be enough */
-	u32 delta_us = ktime_us_delta(end->time, start->time);
-	u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
-	u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
-			     start->byte_ctr);
-
-	if (!delta_us)
-		return;
-
-	curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
-	curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
-	curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
-					delta_us);
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
-	struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
-					      work);
-	struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
-	struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
-	mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq,
-				       cur_profile.usec, cur_profile.pkts);
-
-	am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
-	struct mlx5e_rx_am *am = &rq->am;
-	struct mlx5e_rx_am_sample end_sample;
-	struct mlx5e_rx_am_stats curr_stats;
-	u16 nevents;
-
-	switch (am->state) {
-	case MLX5E_AM_MEASURE_IN_PROGRESS:
-		nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
-				  am->start_sample.event_ctr);
-		if (nevents < MLX5E_AM_NEVENTS)
-			break;
-		mlx5e_am_sample(rq, &end_sample);
-		mlx5e_am_calc_stats(&am->start_sample, &end_sample,
-				    &curr_stats);
-		if (mlx5e_am_decision(&curr_stats, am)) {
-			am->state = MLX5E_AM_APPLY_NEW_PROFILE;
-			schedule_work(&am->work);
-			break;
-		}
-		/* fall through */
-	case MLX5E_AM_START_MEASURE:
-		mlx5e_am_sample(rq, &am->start_sample);
-		am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
-		break;
-	case MLX5E_AM_APPLY_NEW_PROFILE:
-		break;
-	}
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 55979ec..cf528da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -56,12 +56,14 @@ struct mlx5_nic_flow_attr {
 	u32 action;
 	u32 flow_tag;
 	u32 mod_hdr_id;
+	u32 hairpin_tirn;
 };
 
 enum {
 	MLX5E_TC_FLOW_ESWITCH	= BIT(0),
 	MLX5E_TC_FLOW_NIC	= BIT(1),
 	MLX5E_TC_FLOW_OFFLOADED	= BIT(2),
+	MLX5E_TC_FLOW_HAIRPIN	= BIT(3),
 };
 
 struct mlx5e_tc_flow {
@@ -71,6 +73,7 @@ struct mlx5e_tc_flow {
 	struct mlx5_flow_handle *rule;
 	struct list_head	encap;   /* flows sharing the same encap ID */
 	struct list_head	mod_hdr; /* flows sharing the same mod hdr ID */
+	struct list_head	hairpin; /* flows sharing the same hairpin */
 	union {
 		struct mlx5_esw_flow_attr esw_attr[0];
 		struct mlx5_nic_flow_attr nic_attr[0];
@@ -93,6 +96,25 @@ enum {
 #define MLX5E_TC_TABLE_NUM_GROUPS 4
 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
 
+struct mlx5e_hairpin {
+	struct mlx5_hairpin *pair;
+
+	struct mlx5_core_dev *func_mdev;
+	u32 tdn;
+	u32 tirn;
+};
+
+struct mlx5e_hairpin_entry {
+	/* a node of a hash table which keeps all the  hairpin entries */
+	struct hlist_node hairpin_hlist;
+
+	/* flows sharing the same hairpin */
+	struct list_head flows;
+
+	int peer_ifindex;
+	struct mlx5e_hairpin *hp;
+};
+
 struct mod_hdr_key {
 	int num_actions;
 	void *actions;
@@ -222,6 +244,187 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
 	}
 }
 
+static
+struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
+{
+	struct net_device *netdev;
+	struct mlx5e_priv *priv;
+
+	netdev = __dev_get_by_index(net, ifindex);
+	priv = netdev_priv(netdev);
+	return priv->mdev;
+}
+
+static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
+{
+	u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
+	void *tirc;
+	int err;
+
+	err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
+	if (err)
+		goto alloc_tdn_err;
+
+	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+	MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn);
+	MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
+
+	err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
+	if (err)
+		goto create_tir_err;
+
+	return 0;
+
+create_tir_err:
+	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+alloc_tdn_err:
+	return err;
+}
+
+static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
+{
+	mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
+	mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
+}
+
+static struct mlx5e_hairpin *
+mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
+		     int peer_ifindex)
+{
+	struct mlx5_core_dev *func_mdev, *peer_mdev;
+	struct mlx5e_hairpin *hp;
+	struct mlx5_hairpin *pair;
+	int err;
+
+	hp = kzalloc(sizeof(*hp), GFP_KERNEL);
+	if (!hp)
+		return ERR_PTR(-ENOMEM);
+
+	func_mdev = priv->mdev;
+	peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
+
+	pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
+	if (IS_ERR(pair)) {
+		err = PTR_ERR(pair);
+		goto create_pair_err;
+	}
+	hp->pair = pair;
+	hp->func_mdev = func_mdev;
+
+	err = mlx5e_hairpin_create_transport(hp);
+	if (err)
+		goto create_transport_err;
+
+	return hp;
+
+create_transport_err:
+	mlx5_core_hairpin_destroy(hp->pair);
+create_pair_err:
+	kfree(hp);
+	return ERR_PTR(err);
+}
+
+static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
+{
+	mlx5e_hairpin_destroy_transport(hp);
+	mlx5_core_hairpin_destroy(hp->pair);
+	kvfree(hp);
+}
+
+static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
+						     int peer_ifindex)
+{
+	struct mlx5e_hairpin_entry *hpe;
+
+	hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
+			       hairpin_hlist, peer_ifindex) {
+		if (hpe->peer_ifindex == peer_ifindex)
+			return hpe;
+	}
+
+	return NULL;
+}
+
+static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
+				  struct mlx5e_tc_flow *flow,
+				  struct mlx5e_tc_flow_parse_attr *parse_attr)
+{
+	int peer_ifindex = parse_attr->mirred_ifindex;
+	struct mlx5_hairpin_params params;
+	struct mlx5e_hairpin_entry *hpe;
+	struct mlx5e_hairpin *hp;
+	int err;
+
+	if (!MLX5_CAP_GEN(priv->mdev, hairpin)) {
+		netdev_warn(priv->netdev, "hairpin is not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	hpe = mlx5e_hairpin_get(priv, peer_ifindex);
+	if (hpe)
+		goto attach_flow;
+
+	hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
+	if (!hpe)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&hpe->flows);
+	hpe->peer_ifindex = peer_ifindex;
+
+	params.log_data_size = 15;
+	params.log_data_size = min_t(u8, params.log_data_size,
+				     MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
+	params.log_data_size = max_t(u8, params.log_data_size,
+				     MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
+	params.q_counter = priv->q_counter;
+
+	hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+	if (IS_ERR(hp)) {
+		err = PTR_ERR(hp);
+		goto create_hairpin_err;
+	}
+
+	netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x log data size %d\n",
+		   hp->tirn, hp->pair->rqn, hp->pair->peer_mdev->priv.name,
+		   hp->pair->sqn, params.log_data_size);
+
+	hpe->hp = hp;
+	hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, peer_ifindex);
+
+attach_flow:
+	flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+	list_add(&flow->hairpin, &hpe->flows);
+	return 0;
+
+create_hairpin_err:
+	kfree(hpe);
+	return err;
+}
+
+static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
+				   struct mlx5e_tc_flow *flow)
+{
+	struct list_head *next = flow->hairpin.next;
+
+	list_del(&flow->hairpin);
+
+	/* no more hairpin flows for us, release the hairpin pair */
+	if (list_empty(next)) {
+		struct mlx5e_hairpin_entry *hpe;
+
+		hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
+
+		netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+			   hpe->hp->pair->peer_mdev->priv.name);
+
+		mlx5e_hairpin_destroy(hpe->hp);
+		hash_del(&hpe->hairpin_hlist);
+		kfree(hpe);
+	}
+}
+
 static struct mlx5_flow_handle *
 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 		      struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -229,7 +432,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 {
 	struct mlx5_nic_flow_attr *attr = flow->nic_attr;
 	struct mlx5_core_dev *dev = priv->mdev;
-	struct mlx5_flow_destination dest = {};
+	struct mlx5_flow_destination dest[2] = {};
 	struct mlx5_flow_act flow_act = {
 		.action = attr->action,
 		.flow_tag = attr->flow_tag,
@@ -238,18 +441,33 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 	struct mlx5_fc *counter = NULL;
 	struct mlx5_flow_handle *rule;
 	bool table_created = false;
-	int err;
+	int err, dest_ix = 0;
 
 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-		dest.ft = priv->fs.vlan.ft.t;
-	} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
-		counter = mlx5_fc_create(dev, true);
-		if (IS_ERR(counter))
-			return ERR_CAST(counter);
+		if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+			err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+			if (err) {
+				rule = ERR_PTR(err);
+				goto err_add_hairpin_flow;
+			}
+			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+			dest[dest_ix].tir_num = attr->hairpin_tirn;
+		} else {
+			dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+			dest[dest_ix].ft = priv->fs.vlan.ft.t;
+		}
+		dest_ix++;
+	}
 
-		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-		dest.counter = counter;
+	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		counter = mlx5_fc_create(dev, true);
+		if (IS_ERR(counter)) {
+			rule = ERR_CAST(counter);
+			goto err_fc_create;
+		}
+		dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		dest[dest_ix].counter = counter;
+		dest_ix++;
 	}
 
 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
@@ -292,7 +510,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 
 	parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
-				   &flow_act, &dest, 1);
+				   &flow_act, dest, dest_ix);
 
 	if (IS_ERR(rule))
 		goto err_add_rule;
@@ -309,7 +527,10 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 		mlx5e_detach_mod_hdr(priv, flow);
 err_create_mod_hdr_id:
 	mlx5_fc_destroy(dev, counter);
-
+err_fc_create:
+	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+		mlx5e_hairpin_flow_del(priv, flow);
+err_add_hairpin_flow:
 	return rule;
 }
 
@@ -330,6 +551,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
 
 	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
 		mlx5e_detach_mod_hdr(priv, flow);
+
+	if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+		mlx5e_hairpin_flow_del(priv, flow);
 }
 
 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -617,7 +841,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
 						  f->mask);
 		struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-		struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+		struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+		struct net_device *up_dev = uplink_rpriv->netdev;
 		struct mlx5e_priv *up_priv = netdev_priv(up_dev);
 
 		/* Full udp dst port must be given */
@@ -1421,6 +1646,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
 	return true;
 }
 
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
+{
+	struct mlx5_core_dev *fmdev, *pmdev;
+	u16 func_id, peer_id;
+
+	fmdev = priv->mdev;
+	pmdev = peer_priv->mdev;
+
+	func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
+	peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+
+	return (func_id == peer_id);
+}
+
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 				struct mlx5e_tc_flow_parse_attr *parse_attr,
 				struct mlx5e_tc_flow *flow)
@@ -1465,6 +1704,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 			return -EOPNOTSUPP;
 		}
 
+		if (is_tcf_mirred_egress_redirect(a)) {
+			struct net_device *peer_dev = tcf_mirred_dev(a);
+
+			if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
+			    same_hw_devs(priv, netdev_priv(peer_dev))) {
+				parse_attr->mirred_ifindex = peer_dev->ifindex;
+				flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+				attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+						MLX5_FLOW_CONTEXT_ACTION_COUNT;
+			} else {
+				netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
+					    peer_dev->name);
+				return -EINVAL;
+			}
+			continue;
+		}
+
 		if (is_tcf_skbedit_mark(a)) {
 			u32 mark = tcf_skbedit_mark(a);
 
@@ -1507,6 +1763,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 				   int *out_ttl)
 {
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	struct mlx5e_rep_priv *uplink_rpriv;
 	struct rtable *rt;
 	struct neighbour *n = NULL;
 
@@ -1520,9 +1777,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
 	return -EOPNOTSUPP;
 #endif
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
 	/* if the egress device isn't on the same HW e-switch, we use the uplink */
 	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
-		*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+		*out_dev = uplink_rpriv->netdev;
 	else
 		*out_dev = rt->dst.dev;
 
@@ -1547,6 +1805,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
 	struct dst_entry *dst;
 
 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+	struct mlx5e_rep_priv *uplink_rpriv;
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 	int ret;
 
@@ -1557,9 +1816,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
 
 	*out_ttl = ip6_dst_hoplimit(dst);
 
+	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
 	/* if the egress device isn't on the same HW e-switch, we use the uplink */
 	if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
-		*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+		*out_dev = uplink_rpriv->netdev;
 	else
 		*out_dev = dst->dev;
 #else
@@ -1859,7 +2119,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
 			      struct mlx5e_tc_flow *flow)
 {
 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
+	struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
+									   REP_ETH);
+	struct net_device *up_dev = uplink_rpriv->netdev;
 	unsigned short family = ip_tunnel_info_af(tun_info);
 	struct mlx5e_priv *up_priv = netdev_priv(up_dev);
 	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
@@ -1982,11 +2244,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 		}
 
 		if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
 			struct net_device *out_dev;
 			struct mlx5e_priv *out_priv;
 
-			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+			out_dev = tcf_mirred_dev(a);
 
 			if (switchdev_port_same_parent_id(priv->netdev,
 							  out_dev)) {
@@ -1996,7 +2257,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 				rpriv = out_priv->ppriv;
 				attr->out_rep = rpriv->rep;
 			} else if (encap) {
-				parse_attr->mirred_ifindex = ifindex;
+				parse_attr->mirred_ifindex = out_dev->ifindex;
 				parse_attr->tun_info = *info;
 				attr->parse_attr = parse_attr;
 				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
@@ -2182,6 +2443,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv)
 	struct mlx5e_tc_table *tc = &priv->fs.tc;
 
 	hash_init(tc->mod_hdr_tbl);
+	hash_init(tc->hairpin_tbl);
 
 	tc->ht_params = mlx5e_tc_flow_ht_params;
 	return rhashtable_init(&tc->ht, &tc->ht_params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index ab92298..f292bb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 	for (i = 0; i < c->num_tc; i++)
 		mlx5e_cq_arm(&c->sq[i].cq);
 
-	if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM))
-		mlx5e_rx_am(&c->rq);
+	if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) {
+		struct net_dim_sample dim_sample;
+		net_dim_sample(c->rq.cq.event_ctr,
+			       c->rq.stats.packets,
+			       c->rq.stats.bytes,
+			       &dim_sample);
+		net_dim(&c->rq.dim, dim_sample);
+	}
 
 	mlx5e_cq_arm(&c->rq.cq);
 	mlx5e_cq_arm(&c->icosq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bbb140f..5ecf2cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -37,6 +37,7 @@
 #include <linux/mlx5/fs.h>
 #include "mlx5_core.h"
 #include "eswitch.h"
+#include "fs_core.h"
 
 #define UPLINK_VPORT 0xFFFF
 
@@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
 	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
 		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
 
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+						    vport->vport);
 	if (!root_ns) {
-		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+		esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
 		return -EOPNOTSUPP;
 	}
 
@@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
 	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
 		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
 
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+	root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+						    vport->vport);
 	if (!root_ns) {
-		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+		esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
 		return -EOPNOTSUPP;
 	}
 
@@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
 				    struct mlx5_vport *vport)
 {
+	struct mlx5_fc *counter = vport->ingress.drop_counter;
+	struct mlx5_flow_destination drop_ctr_dst = {0};
+	struct mlx5_flow_destination *dst = NULL;
 	struct mlx5_flow_act flow_act = {0};
 	struct mlx5_flow_spec *spec;
+	int dest_num = 0;
 	int err = 0;
 	u8 *smac_v;
 
@@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
 
 	memset(spec, 0, sizeof(*spec));
 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+	/* Attach drop flow counter */
+	if (counter) {
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		drop_ctr_dst.counter = counter;
+		dst = &drop_ctr_dst;
+		dest_num++;
+	}
 	vport->ingress.drop_rule =
 		mlx5_add_flow_rules(vport->ingress.acl, spec,
-				    &flow_act, NULL, 0);
+				    &flow_act, dst, dest_num);
 	if (IS_ERR(vport->ingress.drop_rule)) {
 		err = PTR_ERR(vport->ingress.drop_rule);
 		esw_warn(esw->dev,
@@ -1208,8 +1224,12 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
 				   struct mlx5_vport *vport)
 {
+	struct mlx5_fc *counter = vport->egress.drop_counter;
+	struct mlx5_flow_destination drop_ctr_dst = {0};
+	struct mlx5_flow_destination *dst = NULL;
 	struct mlx5_flow_act flow_act = {0};
 	struct mlx5_flow_spec *spec;
+	int dest_num = 0;
 	int err = 0;
 
 	esw_vport_cleanup_egress_rules(esw, vport);
@@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
 	/* Drop others rule (star rule) */
 	memset(spec, 0, sizeof(*spec));
 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+	/* Attach egress drop flow counter */
+	if (counter) {
+		flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+		drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		drop_ctr_dst.counter = counter;
+		dst = &drop_ctr_dst;
+		dest_num++;
+	}
 	vport->egress.drop_rule =
 		mlx5_add_flow_rules(vport->egress.acl, spec,
-				    &flow_act, NULL, 0);
+				    &flow_act, dst, dest_num);
 	if (IS_ERR(vport->egress.drop_rule)) {
 		err = PTR_ERR(vport->egress.drop_rule);
 		esw_warn(esw->dev,
@@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
 
 	err = mlx5_create_scheduling_element_cmd(dev,
 						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &tsar_ctx,
+						 tsar_ctx,
 						 &esw->qos.root_tsar_id);
 	if (err) {
 		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
 	if (vport->qos.enabled)
 		return -EEXIST;
 
-	MLX5_SET(scheduling_context, &sched_ctx, element_type,
+	MLX5_SET(scheduling_context, sched_ctx, element_type,
 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
 				  element_attributes);
 	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
 		 esw->qos.root_tsar_id);
-	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
 		 initial_max_rate);
-	MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
+	MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
 
 	err = mlx5_create_scheduling_element_cmd(dev,
 						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &sched_ctx,
+						 sched_ctx,
 						 &vport->qos.esw_tsar_ix);
 	if (err) {
 		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
 	if (!vport->qos.enabled)
 		return -EIO;
 
-	MLX5_SET(scheduling_context, &sched_ctx, element_type,
+	MLX5_SET(scheduling_context, sched_ctx, element_type,
 		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+	vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
 				  element_attributes);
 	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+	MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
 		 esw->qos.root_tsar_id);
-	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+	MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
 		 max_rate);
-	MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
+	MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
 	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
 
 	err = mlx5_modify_scheduling_element_cmd(dev,
 						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &sched_ctx,
+						 sched_ctx,
 						 vport->qos.esw_tsar_ix,
 						 bitmask);
 	if (err) {
@@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
 	}
 }
 
+static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
+{
+	struct mlx5_core_dev *dev = vport->dev;
+
+	if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
+		vport->ingress.drop_counter = mlx5_fc_create(dev, false);
+		if (IS_ERR(vport->ingress.drop_counter)) {
+			esw_warn(dev,
+				 "vport[%d] configure ingress drop rule counter failed\n",
+				 vport->vport);
+			vport->ingress.drop_counter = NULL;
+		}
+	}
+
+	if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
+		vport->egress.drop_counter = mlx5_fc_create(dev, false);
+		if (IS_ERR(vport->egress.drop_counter)) {
+			esw_warn(dev,
+				 "vport[%d] configure egress drop rule counter failed\n",
+				 vport->vport);
+			vport->egress.drop_counter = NULL;
+		}
+	}
+}
+
+static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
+{
+	struct mlx5_core_dev *dev = vport->dev;
+
+	if (vport->ingress.drop_counter)
+		mlx5_fc_destroy(dev, vport->ingress.drop_counter);
+	if (vport->egress.drop_counter)
+		mlx5_fc_destroy(dev, vport->egress.drop_counter);
+}
+
 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 			     int enable_events)
 {
@@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 	if (!vport_num)
 		vport->info.trusted = true;
 
+	/* create steering drop counters for ingress and egress ACLs */
+	if (vport_num && esw->mode == SRIOV_LEGACY)
+		esw_vport_create_drop_counters(vport);
+
 	esw_vport_change_handle_locked(vport);
 
 	esw->enabled_vports++;
@@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 					      MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
 		esw_vport_disable_egress_acl(esw, vport);
 		esw_vport_disable_ingress_acl(esw, vport);
+		esw_vport_destroy_drop_counters(vport);
 	}
 	esw->enabled_vports--;
 	mutex_unlock(&esw->state_lock);
@@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 		goto abort;
 	}
 
-	esw->offloads.vport_reps =
-		kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
-			GFP_KERNEL);
-	if (!esw->offloads.vport_reps) {
-		err = -ENOMEM;
+	err = esw_offloads_init_reps(esw);
+	if (err)
 		goto abort;
-	}
 
 	hash_init(esw->offloads.encap_tbl);
 	hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 abort:
 	if (esw->work_queue)
 		destroy_workqueue(esw->work_queue);
+	esw_offloads_cleanup_reps(esw);
 	kfree(esw->vports);
-	kfree(esw->offloads.vport_reps);
 	kfree(esw);
 	return err;
 }
@@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 
 	esw->dev->priv.eswitch = NULL;
 	destroy_workqueue(esw->work_queue);
-	kfree(esw->offloads.vport_reps);
+	esw_offloads_cleanup_reps(esw);
 	kfree(esw->vports);
 	kfree(esw);
 }
@@ -2018,12 +2083,36 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
 	return err;
 }
 
+static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
+						int vport_idx,
+						struct mlx5_vport_drop_stats *stats)
+{
+	struct mlx5_eswitch *esw = dev->priv.eswitch;
+	struct mlx5_vport *vport = &esw->vports[vport_idx];
+	u64 bytes = 0;
+	u16 idx = 0;
+
+	if (!vport->enabled || esw->mode != SRIOV_LEGACY)
+		return;
+
+	if (vport->egress.drop_counter) {
+		idx = vport->egress.drop_counter->id;
+		mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes);
+	}
+
+	if (vport->ingress.drop_counter) {
+		idx = vport->ingress.drop_counter->id;
+		mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes);
+	}
+}
+
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
 				 int vport,
 				 struct ifla_vf_stats *vf_stats)
 {
 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+	struct mlx5_vport_drop_stats stats = {0};
 	int err = 0;
 	u32 *out;
 
@@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
 	vf_stats->broadcast =
 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
 
+	mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
+	vf_stats->rx_dropped = stats.rx_dropped;
+	vf_stats->tx_dropped = stats.tx_dropped;
+
 free_out:
 	kvfree(out);
 	return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 565c8b7..2fa0370 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -45,6 +45,11 @@ enum {
 	SRIOV_OFFLOADS
 };
 
+enum {
+	REP_ETH,
+	NUM_REP_TYPES,
+};
+
 #ifdef CONFIG_MLX5_ESWITCH
 
 #define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -68,6 +73,7 @@ struct vport_ingress {
 	struct mlx5_flow_group *drop_grp;
 	struct mlx5_flow_handle  *allow_rule;
 	struct mlx5_flow_handle  *drop_rule;
+	struct mlx5_fc           *drop_counter;
 };
 
 struct vport_egress {
@@ -76,6 +82,12 @@ struct vport_egress {
 	struct mlx5_flow_group *drop_grp;
 	struct mlx5_flow_handle  *allowed_vlan;
 	struct mlx5_flow_handle  *drop_rule;
+	struct mlx5_fc           *drop_counter;
+};
+
+struct mlx5_vport_drop_stats {
+	u64 rx_dropped;
+	u64 tx_dropped;
 };
 
 struct mlx5_vport_info {
@@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb {
 	};
 };
 
-struct mlx5_esw_sq {
-	struct mlx5_flow_handle	*send_to_vport_rule;
-	struct list_head	 list;
+struct mlx5_eswitch_rep;
+struct mlx5_eswitch_rep_if {
+	int		       (*load)(struct mlx5_core_dev *dev,
+				       struct mlx5_eswitch_rep *rep);
+	void		       (*unload)(struct mlx5_eswitch_rep *rep);
+	void			*priv;
+	bool		       valid;
 };
 
 struct mlx5_eswitch_rep {
-	int		       (*load)(struct mlx5_eswitch *esw,
-				       struct mlx5_eswitch_rep *rep);
-	void		       (*unload)(struct mlx5_eswitch *esw,
-					 struct mlx5_eswitch_rep *rep);
+	struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
 	u16		       vport;
 	u8		       hw_id[ETH_ALEN];
-	struct net_device      *netdev;
-
-	struct mlx5_flow_handle *vport_rx_rule;
-	struct list_head       vport_sqs_list;
 	u16		       vlan;
 	u32		       vlan_refcount;
-	bool		       valid;
 };
 
 struct mlx5_esw_offload {
@@ -197,6 +205,8 @@ struct mlx5_eswitch {
 
 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
+int esw_offloads_init_reps(struct mlx5_eswitch *esw);
 
 /* E-Switch API */
 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
 				 int vport,
 				 struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
+				    u32 sqn);
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
 
 struct mlx5_flow_spec;
 struct mlx5_esw_flow_attr;
@@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr {
 	struct mlx5e_tc_flow_parse_attr *parse_attr;
 };
 
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep,
-				 u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep);
-
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
 				     int vport_index,
-				     struct mlx5_eswitch_rep *rep);
+				     struct mlx5_eswitch_rep_if *rep_if,
+				     u8 rep_type);
 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-				       int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+				       int vport_index,
+				       u8 rep_type);
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
 				 struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1143d80..99f583a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
 	esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
 	for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
 		rep = &esw->offloads.vport_reps[vf_vport];
-		if (!rep->valid)
+		if (!rep->rep_if[REP_ETH].valid)
 			continue;
 
 		err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
 	return err;
 }
 
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
 {
 	struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
 	return flow_rule;
 }
 
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep)
+void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
 {
-	struct mlx5_esw_sq *esw_sq, *tmp;
-
-	if (esw->mode != SRIOV_OFFLOADS)
-		return;
-
-	list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
-		mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
-		list_del(&esw_sq->list);
-		kfree(esw_sq);
-	}
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep,
-				 u16 *sqns_array, int sqns_num)
-{
-	struct mlx5_flow_handle *flow_rule;
-	struct mlx5_esw_sq *esw_sq;
-	int err;
-	int i;
-
-	if (esw->mode != SRIOV_OFFLOADS)
-		return 0;
-
-	for (i = 0; i < sqns_num; i++) {
-		esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
-		if (!esw_sq) {
-			err = -ENOMEM;
-			goto out_err;
-		}
-
-		/* Add re-inject rule to the PF/representor sqs */
-		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
-								rep->vport,
-								sqns_array[i]);
-		if (IS_ERR(flow_rule)) {
-			err = PTR_ERR(flow_rule);
-			kfree(esw_sq);
-			goto out_err;
-		}
-		esw_sq->send_to_vport_rule = flow_rule;
-		list_add(&esw_sq->list, &rep->vport_sqs_list);
-	}
-	return 0;
-
-out_err:
-	mlx5_eswitch_sqs2vport_stop(esw, rep);
-	return err;
+	mlx5_del_flow_rules(rule);
 }
 
 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,10 +684,109 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
 	return err;
 }
 
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+{
+	kfree(esw->offloads.vport_reps);
+}
+
+int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+{
+	int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_esw_offload *offloads;
+	struct mlx5_eswitch_rep *rep;
+	u8 hw_id[ETH_ALEN];
+	int vport;
+
+	esw->offloads.vport_reps = kcalloc(total_vfs,
+					   sizeof(struct mlx5_eswitch_rep),
+					   GFP_KERNEL);
+	if (!esw->offloads.vport_reps)
+		return -ENOMEM;
+
+	offloads = &esw->offloads;
+	mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+
+	for (vport = 0; vport < total_vfs; vport++) {
+		rep = &offloads->vport_reps[vport];
+
+		rep->vport = vport;
+		ether_addr_copy(rep->hw_id, hw_id);
+	}
+
+	offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
+
+	return 0;
+}
+
+static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
+					  u8 rep_type)
 {
 	struct mlx5_eswitch_rep *rep;
 	int vport;
+
+	for (vport = nvports - 1; vport >= 0; vport--) {
+		rep = &esw->offloads.vport_reps[vport];
+		if (!rep->rep_if[rep_type].valid)
+			continue;
+
+		rep->rep_if[rep_type].unload(rep);
+	}
+}
+
+static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
+{
+	u8 rep_type = NUM_REP_TYPES;
+
+	while (rep_type-- > 0)
+		esw_offloads_unload_reps_type(esw, nvports, rep_type);
+}
+
+static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
+				       u8 rep_type)
+{
+	struct mlx5_eswitch_rep *rep;
+	int vport;
+	int err;
+
+	for (vport = 0; vport < nvports; vport++) {
+		rep = &esw->offloads.vport_reps[vport];
+		if (!rep->rep_if[rep_type].valid)
+			continue;
+
+		err = rep->rep_if[rep_type].load(esw->dev, rep);
+		if (err)
+			goto err_reps;
+	}
+
+	return 0;
+
+err_reps:
+	esw_offloads_unload_reps_type(esw, vport, rep_type);
+	return err;
+}
+
+static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
+{
+	u8 rep_type = 0;
+	int err;
+
+	for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+		err = esw_offloads_load_reps_type(esw, nvports, rep_type);
+		if (err)
+			goto err_reps;
+	}
+
+	return err;
+
+err_reps:
+	while (rep_type-- > 0)
+		esw_offloads_unload_reps_type(esw, nvports, rep_type);
+	return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
 	int err;
 
 	/* disable PF RoCE so missed packets don't go through RoCE steering */
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
 	if (err)
 		goto create_fg_err;
 
-	for (vport = 0; vport < nvports; vport++) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-
-		err = rep->load(esw, rep);
-		if (err)
-			goto err_reps;
-	}
+	err = esw_offloads_load_reps(esw, nvports);
+	if (err)
+		goto err_reps;
 
 	return 0;
 
 err_reps:
-	for (vport--; vport >= 0; vport--) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-		rep->unload(esw, rep);
-	}
 	esw_destroy_vport_rx_group(esw);
 
 create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 
 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
 {
-	struct mlx5_eswitch_rep *rep;
-	int vport;
-
-	for (vport = nvports - 1; vport >= 0; vport--) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-		rep->unload(esw, rep);
-	}
-
+	esw_offloads_unload_reps(esw, nvports);
 	esw_destroy_vport_rx_group(esw);
 	esw_destroy_offloads_table(esw);
 	esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
 
 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
 				     int vport_index,
-				     struct mlx5_eswitch_rep *__rep)
+				     struct mlx5_eswitch_rep_if *__rep_if,
+				     u8 rep_type)
 {
 	struct mlx5_esw_offload *offloads = &esw->offloads;
-	struct mlx5_eswitch_rep *rep;
+	struct mlx5_eswitch_rep_if *rep_if;
 
-	rep = &offloads->vport_reps[vport_index];
+	rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
 
-	memset(rep, 0, sizeof(*rep));
+	rep_if->load   = __rep_if->load;
+	rep_if->unload = __rep_if->unload;
+	rep_if->priv = __rep_if->priv;
 
-	rep->load   = __rep->load;
-	rep->unload = __rep->unload;
-	rep->vport  = __rep->vport;
-	rep->netdev = __rep->netdev;
-	ether_addr_copy(rep->hw_id, __rep->hw_id);
-
-	INIT_LIST_HEAD(&rep->vport_sqs_list);
-	rep->valid = true;
+	rep_if->valid = true;
 }
 
 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-				       int vport_index)
+				       int vport_index, u8 rep_type)
 {
 	struct mlx5_esw_offload *offloads = &esw->offloads;
 	struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
 	rep = &offloads->vport_reps[vport_index];
 
 	if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
-		rep->unload(esw, rep);
+		rep->rep_if[rep_type].unload(rep);
 
-	rep->valid = false;
+	rep->rep_if[rep_type].valid = false;
 }
 
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
 {
 #define UPLINK_REP_INDEX 0
 	struct mlx5_esw_offload *offloads = &esw->offloads;
 	struct mlx5_eswitch_rep *rep;
 
 	rep = &offloads->vport_reps[UPLINK_REP_INDEX];
-	return rep->netdev;
+	return rep->rep_if[rep_type].priv;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index dfaad9e..cc4f6ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2026,16 +2026,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 			return &steering->fdb_root_ns->ns;
 		else
 			return NULL;
-	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
-		if (steering->esw_egress_root_ns)
-			return &steering->esw_egress_root_ns->ns;
-		else
-			return NULL;
-	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
-		if (steering->esw_ingress_root_ns)
-			return &steering->esw_ingress_root_ns->ns;
-		else
-			return NULL;
 	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
 		if (steering->sniffer_rx_root_ns)
 			return &steering->sniffer_rx_root_ns->ns;
@@ -2066,6 +2056,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 }
 EXPORT_SYMBOL(mlx5_get_flow_namespace);
 
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+							      enum mlx5_flow_namespace_type type,
+							      int vport)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+
+	if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
+		return NULL;
+
+	switch (type) {
+	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+		if (steering->esw_egress_root_ns &&
+		    steering->esw_egress_root_ns[vport])
+			return &steering->esw_egress_root_ns[vport]->ns;
+		else
+			return NULL;
+	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+		if (steering->esw_ingress_root_ns &&
+		    steering->esw_ingress_root_ns[vport])
+			return &steering->esw_ingress_root_ns[vport]->ns;
+		else
+			return NULL;
+	default:
+		return NULL;
+	}
+}
+
 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
 				      unsigned int prio, int num_levels)
 {
@@ -2343,13 +2360,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
 	clean_tree(&root_ns->ns.node);
 }
 
+static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int i;
+
+	if (!steering->esw_egress_root_ns)
+		return;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+		cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+	kfree(steering->esw_egress_root_ns);
+}
+
+static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int i;
+
+	if (!steering->esw_ingress_root_ns)
+		return;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
+		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+	kfree(steering->esw_ingress_root_ns);
+}
+
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_flow_steering *steering = dev->priv.steering;
 
 	cleanup_root_ns(steering->root_ns);
-	cleanup_root_ns(steering->esw_egress_root_ns);
-	cleanup_root_ns(steering->esw_ingress_root_ns);
+	cleanup_egress_acls_root_ns(dev);
+	cleanup_ingress_acls_root_ns(dev);
 	cleanup_root_ns(steering->fdb_root_ns);
 	cleanup_root_ns(steering->sniffer_rx_root_ns);
 	cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2418,34 +2463,86 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
 	return PTR_ERR(prio);
 }
 
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
 {
 	struct fs_prio *prio;
 
-	steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
-	if (!steering->esw_egress_root_ns)
+	steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+	if (!steering->esw_egress_root_ns[vport])
 		return -ENOMEM;
 
 	/* create 1 prio*/
-	prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
-			      MLX5_TOTAL_VPORTS(steering->dev));
+	prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
 	return PTR_ERR_OR_ZERO(prio);
 }
 
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
 {
 	struct fs_prio *prio;
 
-	steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
-	if (!steering->esw_ingress_root_ns)
+	steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+	if (!steering->esw_ingress_root_ns[vport])
 		return -ENOMEM;
 
 	/* create 1 prio*/
-	prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
-			      MLX5_TOTAL_VPORTS(steering->dev));
+	prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
 	return PTR_ERR_OR_ZERO(prio);
 }
 
+static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int err;
+	int i;
+
+	steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+					       sizeof(*steering->esw_egress_root_ns),
+					       GFP_KERNEL);
+	if (!steering->esw_egress_root_ns)
+		return -ENOMEM;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+		err = init_egress_acl_root_ns(steering, i);
+		if (err)
+			goto cleanup_root_ns;
+	}
+
+	return 0;
+
+cleanup_root_ns:
+	for (i--; i >= 0; i--)
+		cleanup_root_ns(steering->esw_egress_root_ns[i]);
+	kfree(steering->esw_egress_root_ns);
+	return err;
+}
+
+static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	int err;
+	int i;
+
+	steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
+						sizeof(*steering->esw_ingress_root_ns),
+						GFP_KERNEL);
+	if (!steering->esw_ingress_root_ns)
+		return -ENOMEM;
+
+	for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
+		err = init_ingress_acl_root_ns(steering, i);
+		if (err)
+			goto cleanup_root_ns;
+	}
+
+	return 0;
+
+cleanup_root_ns:
+	for (i--; i >= 0; i--)
+		cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+	kfree(steering->esw_ingress_root_ns);
+	return err;
+}
+
 int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_flow_steering *steering;
@@ -2488,12 +2585,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
 				goto err;
 		}
 		if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-			err = init_egress_acl_root_ns(steering);
+			err = init_egress_acls_root_ns(dev);
 			if (err)
 				goto err;
 		}
 		if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-			err = init_ingress_acl_root_ns(steering);
+			err = init_ingress_acls_root_ns(dev);
 			if (err)
 				goto err;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 397d24a..0526270 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
 	struct kmem_cache               *ftes_cache;
 	struct mlx5_flow_root_namespace *root_ns;
 	struct mlx5_flow_root_namespace *fdb_root_ns;
-	struct mlx5_flow_root_namespace *esw_egress_root_ns;
-	struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+	struct mlx5_flow_root_namespace **esw_egress_root_ns;
+	struct mlx5_flow_root_namespace **esw_ingress_root_ns;
 	struct mlx5_flow_root_namespace	*sniffer_tx_root_ns;
 	struct mlx5_flow_root_namespace	*sniffer_rx_root_ns;
 };
@@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
 			      unsigned long delay);
 void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
 				      unsigned long interval);
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+		  u64 *packets, u64 *bytes);
 
 int mlx5_init_fs(struct mlx5_core_dev *dev);
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 89d1f865..b7ab929d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
 	}
 }
 
+int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id,
+		  u64 *packets, u64 *bytes)
+{
+	return mlx5_cmd_fc_query(dev, id, packets, bytes);
+}
+
 void mlx5_fc_query_cached(struct mlx5_fc *counter,
 			  u64 *bytes, u64 *packets, u64 *lastuse)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 6f338a9..90cb50f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
 const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
 	.get_drvinfo        = mlx5i_get_drvinfo,
 	.get_link           = ethtool_op_get_link,
+	.get_ts_info        = mlx5i_get_ts_info,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 8812d72..3b2363e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -41,7 +41,6 @@
 static int mlx5i_open(struct net_device *netdev);
 static int mlx5i_close(struct net_device *netdev);
 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 
 static const struct net_device_ops mlx5i_netdev_ops = {
 	.ndo_open                = mlx5i_open,
@@ -396,7 +395,7 @@ int mlx5i_dev_init(struct net_device *dev)
 	return 0;
 }
 
-static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
 	struct mlx5e_priv *priv = mlx5i_epriv(dev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 4900802..6d9053b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
 /* Get the net-device corresponding to the given underlay QPN */
 struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
 
-/* Shared ndo functionts */
+/* Shared ndo functions */
 int mlx5i_dev_init(struct net_device *dev);
 void mlx5i_dev_cleanup(struct net_device *dev);
+int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 
 /* Parent profile functions */
 void mlx5i_init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 531b02c..b69e9d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev);
 static int mlx5i_pkey_dev_init(struct net_device *dev);
 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
 static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 
 static const struct net_device_ops mlx5i_pkey_netdev_ops = {
 	.ndo_open                = mlx5i_pkey_open,
@@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = {
 	.ndo_init                = mlx5i_pkey_dev_init,
 	.ndo_uninit              = mlx5i_pkey_dev_cleanup,
 	.ndo_change_mtu          = mlx5i_pkey_change_mtu,
+	.ndo_do_ioctl            = mlx5i_pkey_ioctl,
 };
 
 /* Child NDOs */
@@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
 	return mlx5i_dev_init(dev);
 }
 
+static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	return mlx5i_ioctl(dev, ifr, cmd);
+}
+
 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
 {
 	return mlx5i_dev_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 5e128d7..a09ebba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -398,3 +398,187 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
 	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 EXPORT_SYMBOL(mlx5_core_destroy_rqt);
+
+static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
+				  struct mlx5_hairpin_params *params, u32 *rqn)
+{
+	u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
+	void *rqc, *wq;
+
+	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+	wq  = MLX5_ADDR_OF(rqc, rqc, wq);
+
+	MLX5_SET(rqc, rqc, hairpin, 1);
+	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+	MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
+
+	MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+
+	return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
+}
+
+static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
+				  struct mlx5_hairpin_params *params, u32 *sqn)
+{
+	u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
+	void *sqc, *wq;
+
+	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+	wq  = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	MLX5_SET(sqc, sqc, hairpin, 1);
+	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+
+	MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
+
+	return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
+}
+
+static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
+				      struct mlx5_hairpin_params *params)
+{
+	int err;
+
+	err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn);
+	if (err)
+		goto out_err_rq;
+
+	err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn);
+	if (err)
+		goto out_err_sq;
+
+	return 0;
+
+out_err_sq:
+	mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
+out_err_rq:
+	return err;
+}
+
+static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
+{
+	mlx5_core_destroy_rq(hp->func_mdev, hp->rqn);
+	mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn);
+}
+
+static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
+				  int curr_state, int next_state,
+				  u16 peer_vhca, u32 peer_sq)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0};
+	void *rqc;
+
+	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+	if (next_state == MLX5_RQC_STATE_RDY) {
+		MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
+		MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
+	}
+
+	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+	MLX5_SET(rqc, rqc, state, next_state);
+
+	return mlx5_core_modify_rq(func_mdev, rqn,
+				   in, MLX5_ST_SZ_BYTES(modify_rq_in));
+}
+
+static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
+				  int curr_state, int next_state,
+				  u16 peer_vhca, u32 peer_rq)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
+	void *sqc;
+
+	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+	if (next_state == MLX5_RQC_STATE_RDY) {
+		MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
+		MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
+	}
+
+	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+	MLX5_SET(sqc, sqc, state, next_state);
+
+	return mlx5_core_modify_sq(peer_mdev, sqn,
+				   in, MLX5_ST_SZ_BYTES(modify_sq_in));
+}
+
+static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
+{
+	int err;
+
+	/* set peer SQ */
+	err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn,
+				     MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+				     MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn);
+	if (err)
+		goto err_modify_sq;
+
+	/* set func RQ */
+	err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn,
+				     MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
+				     MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn);
+
+	if (err)
+		goto err_modify_rq;
+
+	return 0;
+
+err_modify_rq:
+	mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
+			       MLX5_SQC_STATE_RST, 0, 0);
+err_modify_sq:
+	return err;
+}
+
+static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
+{
+	/* unset func RQ */
+	mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, MLX5_RQC_STATE_RDY,
+			       MLX5_RQC_STATE_RST, 0, 0);
+
+	/* unset peer SQ */
+	mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY,
+			       MLX5_SQC_STATE_RST, 0, 0);
+}
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+			 struct mlx5_core_dev *peer_mdev,
+			 struct mlx5_hairpin_params *params)
+{
+	struct mlx5_hairpin *hp;
+	int size, err;
+
+	size = sizeof(*hp);
+	hp = kzalloc(size, GFP_KERNEL);
+	if (!hp)
+		return ERR_PTR(-ENOMEM);
+
+	hp->func_mdev = func_mdev;
+	hp->peer_mdev = peer_mdev;
+
+	/* alloc and pair func --> peer hairpin */
+	err = mlx5_hairpin_create_queues(hp, params);
+	if (err)
+		goto err_create_queues;
+
+	err = mlx5_hairpin_pair_queues(hp);
+	if (err)
+		goto err_pair_queues;
+
+	return hp;
+
+err_pair_queues:
+	mlx5_hairpin_destroy_queues(hp);
+err_create_queues:
+	kfree(hp);
+	return ERR_PTR(err);
+}
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
+{
+	mlx5_hairpin_unpair_queues(hp);
+	mlx5_hairpin_destroy_queues(hp);
+	kfree(hp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3837ca..54c7d92 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -358,7 +358,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
 	if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
 		return 0;
 
-	dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
+	dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is out of date\n",
 		 rev->major, rev->minor, rev->subminor);
 	dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
 		 MLXSW_SP_FW_FILENAME);
@@ -1571,14 +1571,11 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
 				      const struct tc_action *a,
 				      bool ingress)
 {
-	struct net *net = dev_net(mlxsw_sp_port->dev);
 	enum mlxsw_sp_span_type span_type;
 	struct mlxsw_sp_port *to_port;
 	struct net_device *to_dev;
-	int ifindex;
 
-	ifindex = tcf_mirred_ifindex(a);
-	to_dev = __dev_get_by_index(net, ifindex);
+	to_dev = tcf_mirred_dev(a);
 	if (!to_dev) {
 		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
 		return -EINVAL;
@@ -1838,6 +1835,54 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
 	}
 }
 
+
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+	if (!enable && (mlxsw_sp_port->acl_rule_count ||
+			!list_empty(&mlxsw_sp_port->mall_tc_list))) {
+		netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+				   netdev_features_t wanted_features,
+				   netdev_features_t feature,
+				   mlxsw_sp_feature_handler feature_handler)
+{
+	netdev_features_t changes = wanted_features ^ dev->features;
+	bool enable = !!(wanted_features & feature);
+	int err;
+
+	if (!(changes & feature))
+		return 0;
+
+	err = feature_handler(dev, enable);
+	if (err) {
+		netdev_err(dev, "%s feature %pNF failed, err %d\n",
+			   enable ? "Enable" : "Disable", &feature, err);
+		return err;
+	}
+
+	if (enable)
+		dev->features |= feature;
+	else
+		dev->features &= ~feature;
+
+	return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+				 netdev_features_t features)
+{
+	return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+				       mlxsw_sp_feature_hw_tc);
+}
+
 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
 	.ndo_open		= mlxsw_sp_port_open,
 	.ndo_stop		= mlxsw_sp_port_stop,
@@ -1852,6 +1897,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
 	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
 	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
 	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
+	.ndo_set_features	= mlxsw_sp_set_features,
 };
 
 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3039,6 +3085,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 		goto err_port_fids_init;
 	}
 
+	err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_qdiscs_init;
+	}
+
 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
 	if (IS_ERR(mlxsw_sp_port_vlan)) {
 		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
@@ -3067,6 +3120,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
 err_port_vlan_get:
+	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
+err_port_qdiscs_init:
 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
 err_port_fids_init:
 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
@@ -3102,6 +3157,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 	mlxsw_sp->ports[local_port] = NULL;
 	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
 	mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+	mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
 	mlxsw_sp_port_fids_fini(mlxsw_sp_port);
 	mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
 	mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 05ce1be..b6f475e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -204,29 +204,6 @@ struct mlxsw_sp_port_vlan {
 	struct list_head bridge_vlan_node;
 };
 
-enum mlxsw_sp_qdisc_type {
-	MLXSW_SP_QDISC_NO_QDISC,
-	MLXSW_SP_QDISC_RED,
-};
-
-struct mlxsw_sp_qdisc {
-	u32 handle;
-	enum mlxsw_sp_qdisc_type type;
-	struct red_stats xstats_base;
-	union {
-		struct {
-			u64 tail_drop_base;
-			u64 ecn_base;
-			u64 wred_drop_base;
-		} red;
-	} xstats;
-
-	u64 tx_bytes;
-	u64 tx_packets;
-	u64 drops;
-	u64 overlimits;
-};
-
 /* No need an internal lock; At worse - miss a single periodic iteration */
 struct mlxsw_sp_port_xstats {
 	u64 ecn;
@@ -269,7 +246,8 @@ struct mlxsw_sp_port {
 	} periodic_hw_stats;
 	struct mlxsw_sp_port_sample *sample;
 	struct list_head vlans_list;
-	struct mlxsw_sp_qdisc root_qdisc;
+	struct mlxsw_sp_qdisc *root_qdisc;
+	unsigned acl_rule_count;
 };
 
 static inline bool
@@ -583,6 +561,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 			  struct tc_cls_flower_offload *f);
 
 /* spectrum_qdisc.c */
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port);
 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
 			  struct tc_red_qopt_offload *p);
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f0e578..42e8a36 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -92,7 +92,6 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 			if (err)
 				return err;
 		} else if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
 			struct net_device *out_dev;
 			struct mlxsw_sp_fid *fid;
 			u16 fid_index;
@@ -104,7 +103,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 			if (err)
 				return err;
 
-			out_dev = __dev_get_by_index(dev_net(dev), ifindex);
+			out_dev = tcf_mirred_dev(a);
 			if (out_dev == dev)
 				out_dev = NULL;
 
@@ -424,6 +423,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 		goto err_rule_add;
 
 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+	mlxsw_sp_port->acl_rule_count++;
 	return 0;
 
 err_rule_add:
@@ -455,6 +455,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
 	}
 
 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+	mlxsw_sp_port->acl_rule_count--;
 }
 
 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index b5397da..971f689 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -41,6 +41,138 @@
 #include "spectrum.h"
 #include "reg.h"
 
+enum mlxsw_sp_qdisc_type {
+	MLXSW_SP_QDISC_NO_QDISC,
+	MLXSW_SP_QDISC_RED,
+};
+
+struct mlxsw_sp_qdisc_ops {
+	enum mlxsw_sp_qdisc_type type;
+	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
+			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			    void *params);
+	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
+	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			 struct tc_qopt_offload_stats *stats_ptr);
+	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
+			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			  void *xstats_ptr);
+	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
+};
+
+struct mlxsw_sp_qdisc {
+	u32 handle;
+	u8 tclass_num;
+	union {
+		struct red_stats red;
+	} xstats_base;
+	struct mlxsw_sp_qdisc_stats {
+		u64 tx_bytes;
+		u64 tx_packets;
+		u64 drops;
+		u64 overlimits;
+	} stats_base;
+
+	struct mlxsw_sp_qdisc_ops *ops;
+};
+
+static bool
+mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
+		       enum mlxsw_sp_qdisc_type type)
+{
+	return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+	       mlxsw_sp_qdisc->ops->type == type &&
+	       mlxsw_sp_qdisc->handle == handle;
+}
+
+static int
+mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+	int err = 0;
+
+	if (!mlxsw_sp_qdisc)
+		return 0;
+
+	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
+		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
+						   mlxsw_sp_qdisc);
+
+	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
+	mlxsw_sp_qdisc->ops = NULL;
+	return err;
+}
+
+static int
+mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+		       struct mlxsw_sp_qdisc_ops *ops, void *params)
+{
+	int err;
+
+	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
+		/* In case this location contained a different qdisc of the
+		 * same type we can override the old qdisc configuration.
+		 * Otherwise, we need to remove the old qdisc before setting the
+		 * new one.
+		 */
+		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+	err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+	if (err)
+		goto err_bad_param;
+
+	err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
+	if (err)
+		goto err_config;
+
+	if (mlxsw_sp_qdisc->handle != handle) {
+		mlxsw_sp_qdisc->ops = ops;
+		if (ops->clean_stats)
+			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
+	}
+
+	mlxsw_sp_qdisc->handle = handle;
+	return 0;
+
+err_bad_param:
+err_config:
+	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+	return err;
+}
+
+static int
+mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			 struct tc_qopt_offload_stats *stats_ptr)
+{
+	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+	    mlxsw_sp_qdisc->ops->get_stats)
+		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
+						      mlxsw_sp_qdisc,
+						      stats_ptr);
+
+	return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
+			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			  void *xstats_ptr)
+{
+	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
+	    mlxsw_sp_qdisc->ops->get_xstats)
+		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
+						      mlxsw_sp_qdisc,
+						      xstats_ptr);
+
+	return -EOPNOTSUPP;
+}
+
 static int
 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
 				  int tclass_num, u32 min, u32 max,
@@ -80,80 +212,76 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
 }
 
 static void
-mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
-				    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-				    int tclass_num)
+mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
 {
-	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+	struct mlxsw_sp_qdisc_stats *stats_base;
 	struct mlxsw_sp_port_xstats *xstats;
 	struct rtnl_link_stats64 *stats;
+	struct red_stats *red_base;
 
 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+	stats_base = &mlxsw_sp_qdisc->stats_base;
+	red_base = &mlxsw_sp_qdisc->xstats_base.red;
 
-	mlxsw_sp_qdisc->tx_packets = stats->tx_packets;
-	mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes;
+	stats_base->tx_packets = stats->tx_packets;
+	stats_base->tx_bytes = stats->tx_bytes;
 
-	switch (mlxsw_sp_qdisc->type) {
-	case MLXSW_SP_QDISC_RED:
-		xstats_base->prob_mark = xstats->ecn;
-		xstats_base->prob_drop = xstats->wred_drop[tclass_num];
-		xstats_base->pdrop = xstats->tail_drop[tclass_num];
+	red_base->prob_mark = xstats->ecn;
+	red_base->prob_drop = xstats->wred_drop[tclass_num];
+	red_base->pdrop = xstats->tail_drop[tclass_num];
 
-		mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop +
-					     xstats_base->prob_mark;
-		mlxsw_sp_qdisc->drops = xstats_base->prob_drop +
-					xstats_base->pdrop;
-		break;
-	default:
-		break;
-	}
+	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
+	stats_base->drops = red_base->prob_drop + red_base->pdrop;
 }
 
 static int
-mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
-			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			   int tclass_num)
+mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
 {
-	int err;
-
-	if (mlxsw_sp_qdisc->handle != handle)
-		return 0;
-
-	err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
-	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
-	mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC;
-
-	return err;
+	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
+						  mlxsw_sp_qdisc->tclass_num);
 }
 
 static int
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
-			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			   int tclass_num,
-			   struct tc_red_qopt_offload_params *p)
+mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
+				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+				void *params)
 {
 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-	u32 min, max;
-	u64 prob;
-	int err = 0;
+	struct tc_red_qopt_offload_params *p = params;
 
 	if (p->min > p->max) {
 		dev_err(mlxsw_sp->bus_info->dev,
 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
 			p->max);
-		goto err_bad_param;
+		return -EINVAL;
 	}
 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
 		dev_err(mlxsw_sp->bus_info->dev,
 			"spectrum: RED: max value %u is too big\n", p->max);
-		goto err_bad_param;
+		return -EINVAL;
 	}
 	if (p->min == 0 || p->max == 0) {
 		dev_err(mlxsw_sp->bus_info->dev,
 			"spectrum: RED: 0 value is illegal for min and max\n");
-		goto err_bad_param;
+		return -EINVAL;
 	}
+	return 0;
+}
+
+static int
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+			   void *params)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct tc_red_qopt_offload_params *p = params;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+	u32 min, max;
+	u64 prob;
 
 	/* calculate probability in percentage */
 	prob = p->probability;
@@ -162,116 +290,132 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
 	prob = DIV_ROUND_UP(prob, 1 << 16);
 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
-	err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
-						max, prob, p->is_ecn);
-	if (err)
-		goto err_config;
-
-	mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED;
-	if (mlxsw_sp_qdisc->handle != handle)
-		mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port,
-						    mlxsw_sp_qdisc,
-						    tclass_num);
-
-	mlxsw_sp_qdisc->handle = handle;
-	return 0;
-
-err_bad_param:
-	err = -EINVAL;
-err_config:
-	mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle,
-				   mlxsw_sp_qdisc, tclass_num);
-	return err;
+	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
+						 max, prob, p->is_ecn);
 }
 
 static int
-mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			      int tclass_num, struct red_stats *res)
+			      void *xstats_ptr)
 {
-	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base;
+	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
 	struct mlxsw_sp_port_xstats *xstats;
-
-	if (mlxsw_sp_qdisc->handle != handle ||
-	    mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
-		return -EOPNOTSUPP;
+	struct red_stats *res = xstats_ptr;
+	int early_drops, marks, pdrops;
 
 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
 
-	res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
-	res->prob_mark = xstats->ecn - xstats_base->prob_mark;
-	res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+	marks = xstats->ecn - xstats_base->prob_mark;
+	pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
+
+	res->pdrop += pdrops;
+	res->prob_drop += early_drops;
+	res->prob_mark += marks;
+
+	xstats_base->pdrop += pdrops;
+	xstats_base->prob_drop += early_drops;
+	xstats_base->prob_mark += marks;
 	return 0;
 }
 
 static int
-mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
+mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
-			     int tclass_num,
-			     struct tc_red_qopt_offload_stats *res)
+			     struct tc_qopt_offload_stats *stats_ptr)
 {
 	u64 tx_bytes, tx_packets, overlimits, drops;
+	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
+	struct mlxsw_sp_qdisc_stats *stats_base;
 	struct mlxsw_sp_port_xstats *xstats;
 	struct rtnl_link_stats64 *stats;
 
-	if (mlxsw_sp_qdisc->handle != handle ||
-	    mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED)
-		return -EOPNOTSUPP;
-
 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
+	stats_base = &mlxsw_sp_qdisc->stats_base;
 
-	tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes;
-	tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets;
+	tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
+	tx_packets = stats->tx_packets - stats_base->tx_packets;
 	overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
-		     mlxsw_sp_qdisc->overlimits;
+		     stats_base->overlimits;
 	drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
-		mlxsw_sp_qdisc->drops;
+		stats_base->drops;
 
-	_bstats_update(res->bstats, tx_bytes, tx_packets);
-	res->qstats->overlimits += overlimits;
-	res->qstats->drops += drops;
-	res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
-						xstats->backlog[tclass_num]);
+	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
+	stats_ptr->qstats->overlimits += overlimits;
+	stats_ptr->qstats->drops += drops;
+	stats_ptr->qstats->backlog +=
+			mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
+					     xstats->backlog[tclass_num]);
 
-	mlxsw_sp_qdisc->drops +=  drops;
-	mlxsw_sp_qdisc->overlimits += overlimits;
-	mlxsw_sp_qdisc->tx_bytes += tx_bytes;
-	mlxsw_sp_qdisc->tx_packets += tx_packets;
+	stats_base->drops +=  drops;
+	stats_base->overlimits += overlimits;
+	stats_base->tx_bytes += tx_bytes;
+	stats_base->tx_packets += tx_packets;
 	return 0;
 }
 
 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
 
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
+	.type = MLXSW_SP_QDISC_RED,
+	.check_params = mlxsw_sp_qdisc_red_check_params,
+	.replace = mlxsw_sp_qdisc_red_replace,
+	.destroy = mlxsw_sp_qdisc_red_destroy,
+	.get_stats = mlxsw_sp_qdisc_get_red_stats,
+	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
+	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
+};
+
 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
 			  struct tc_red_qopt_offload *p)
 {
 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
-	int tclass_num;
 
 	if (p->parent != TC_H_ROOT)
 		return -EOPNOTSUPP;
 
-	mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc;
-	tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+	mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
+
+	if (p->command == TC_RED_REPLACE)
+		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
+					      mlxsw_sp_qdisc,
+					      &mlxsw_sp_qdisc_ops_red,
+					      &p->set);
+
+	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
+				    MLXSW_SP_QDISC_RED))
+		return -EOPNOTSUPP;
 
 	switch (p->command) {
-	case TC_RED_REPLACE:
-		return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle,
-						  mlxsw_sp_qdisc, tclass_num,
-						  &p->set);
 	case TC_RED_DESTROY:
-		return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle,
-						  mlxsw_sp_qdisc, tclass_num);
+		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
 	case TC_RED_XSTATS:
-		return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle,
-						     mlxsw_sp_qdisc, tclass_num,
-						     p->xstats);
+		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
+						 p->xstats);
 	case TC_RED_STATS:
-		return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle,
-						    mlxsw_sp_qdisc, tclass_num,
-						    &p->stats);
+		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
+						&p->stats);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
+
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
+					    GFP_KERNEL);
+	if (!mlxsw_sp_port->root_qdisc)
+		return -ENOMEM;
+
+	mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+
+	return 0;
+}
+
+void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	kfree(mlxsw_sp_port->root_qdisc);
+}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 2fe96f1..bd6e901 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -28,6 +28,7 @@
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <linux/of_net.h>
 
 #include "ks8851.h"
 
@@ -407,15 +408,23 @@ static void ks8851_read_mac_addr(struct net_device *dev)
  * @ks: The device structure
  *
  * Get or create the initial mac address for the device and then set that
- * into the station address register. If there is an EEPROM present, then
+ * into the station address register. A mac address supplied in the device
+ * tree takes precedence. Otherwise, if there is an EEPROM present, then
  * we try that. If no valid mac address is found we use eth_random_addr()
  * to create a new one.
  */
 static void ks8851_init_mac(struct ks8851_net *ks)
 {
 	struct net_device *dev = ks->netdev;
+	const u8 *mac_addr;
 
-	/* first, try reading what we've got already */
+	mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
+	if (mac_addr) {
+		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+		ks8851_write_mac_addr(dev);
+		return;
+	}
+
 	if (ks->rc_ccr & CCR_EEPROM) {
 		ks8851_read_mac_addr(dev);
 		if (is_valid_ether_addr(dev->dev_addr))
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 24c4408..6e5ef98 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -22,6 +22,7 @@
 	    nfp_hwmon.o \
 	    nfp_main.o \
 	    nfp_net_common.o \
+	    nfp_net_debugdump.o \
 	    nfp_net_ethtool.o \
 	    nfp_net_main.o \
 	    nfp_net_repr.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
new file mode 100644
index 0000000..7206aa1
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_BPF_FW_H
+#define NFP_BPF_FW_H 1
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+enum bpf_cap_tlv_type {
+	NFP_BPF_CAP_TYPE_ADJUST_HEAD	= 2,
+};
+
+struct nfp_bpf_cap_tlv_adjust_head {
+	__le32 flags;
+	__le32 off_min;
+	__le32 off_max;
+	__le32 guaranteed_sub;
+	__le32 guaranteed_add;
+};
+
+#define NFP_BPF_ADJUST_HEAD_NO_META	BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 995e954..47c5224 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -33,6 +33,7 @@
 
 #define pr_fmt(fmt)	"NFP net bpf: " fmt
 
+#include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
@@ -66,12 +67,6 @@
 	     next2 = nfp_meta_next(next))
 
 static bool
-nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
-{
-	return meta->l.next != &nfp_prog->insns;
-}
-
-static bool
 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	return meta->l.prev != &nfp_prog->insns;
@@ -90,19 +85,25 @@ static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
 
 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
 {
-	return nfp_prog->start_off + nfp_prog->prog_len;
+	return nfp_prog->prog_len;
 }
 
-static unsigned int
-nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+static bool
+nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
 {
-	return offset - nfp_prog->start_off;
+	/* If there is a recorded error we may have dropped instructions;
+	 * that doesn't have to be due to translator bug, and the translation
+	 * will fail anyway, so just return OK.
+	 */
+	if (nfp_prog->error)
+		return true;
+	return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
 }
 
 /* --- Emitters --- */
 static void
 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
-	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+	   u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync, bool indir)
 {
 	enum cmd_ctx_swap ctx;
 	u64 insn;
@@ -120,14 +121,15 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
 		FIELD_PREP(OP_CMD_CNT, size) |
 		FIELD_PREP(OP_CMD_SIG, sync) |
 		FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+		FIELD_PREP(OP_CMD_INDIR, indir) |
 		FIELD_PREP(OP_CMD_MODE, mode);
 
 	nfp_prog_push(nfp_prog, insn);
 }
 
 static void
-emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
-	 u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
+emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+	     swreg lreg, swreg rreg, u8 size, bool sync, bool indir)
 {
 	struct nfp_insn_re_regs reg;
 	int err;
@@ -148,7 +150,22 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
 		return;
 	}
 
-	__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+	__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync,
+		   indir);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+	 swreg lreg, swreg rreg, u8 size, bool sync)
+{
+	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, false);
+}
+
+static void
+emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
+	       swreg lreg, swreg rreg, u8 size, bool sync)
+{
+	emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, sync, true);
 }
 
 static void
@@ -172,22 +189,28 @@ __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
 	nfp_prog_push(nfp_prog, insn);
 }
 
-static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+static void
+emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
+	     enum nfp_relo_type relo)
 {
-	if (defer > 2) {
+	if (mask == BR_UNC && defer > 2) {
 		pr_err("BUG: branch defer out of bounds %d\n", defer);
 		nfp_prog->error = -EFAULT;
 		return;
 	}
-	__emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+
+	__emit_br(nfp_prog, mask,
+		  mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+		  BR_CSS_NONE, addr, defer);
+
+	nfp_prog->prog[nfp_prog->prog_len - 1] |=
+		FIELD_PREP(OP_RELO_TYPE, relo);
 }
 
 static void
 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
 {
-	__emit_br(nfp_prog, mask,
-		  mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
-		  BR_CSS_NONE, addr, defer);
+	emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
 }
 
 static void
@@ -230,9 +253,11 @@ emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
 		return;
 	}
 
-	__emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
-		     invert, shift, reg.wr_both,
-		     reg.dst_lmextn, reg.src_lmextn);
+	/* Use reg.dst when destination is No-Dest. */
+	__emit_immed(nfp_prog,
+		     swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
+		     reg.breg, imm >> 8, width, invert, shift,
+		     reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
 }
 
 static void
@@ -490,16 +515,6 @@ static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
 		emit_nop(nfp_prog);
 }
 
-static void
-wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
-	       enum br_special special)
-{
-	emit_br(nfp_prog, mask, 0, 0);
-
-	nfp_prog->prog[nfp_prog->prog_len - 1] |=
-		FIELD_PREP(OP_BR_SPECIAL, special);
-}
-
 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
 {
 	emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
@@ -510,6 +525,147 @@ static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
 	wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
 }
 
+/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
+ * result to @dst from low end.
+ */
+static void
+wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
+		u8 offset)
+{
+	enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
+	u8 mask = (1 << field_len) - 1;
+
+	emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
+}
+
+/* NFP has Command Push Pull bus which supports bluk memory operations. */
+static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	bool descending_seq = meta->ldst_gather_len < 0;
+	s16 len = abs(meta->ldst_gather_len);
+	swreg src_base, off;
+	unsigned int i;
+	u8 xfer_num;
+
+	off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
+	src_base = reg_a(meta->insn.src_reg * 2);
+	xfer_num = round_up(len, 4) / 4;
+
+	/* Setup PREV_ALU fields to override memory read length. */
+	if (len > 32)
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+
+	/* Memory read from source addr into transfer-in registers. */
+	emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
+		     off, xfer_num - 1, true, len > 32);
+
+	/* Move from transfer-in to transfer-out. */
+	for (i = 0; i < xfer_num; i++)
+		wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
+
+	off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
+
+	if (len <= 8) {
+		/* Use single direct_ref write8. */
+		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+			 reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
+			 true);
+	} else if (len <= 32 && IS_ALIGNED(len, 4)) {
+		/* Use single direct_ref write32. */
+		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
+			 true);
+	} else if (len <= 32) {
+		/* Use single indirect_ref write8. */
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
+		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
+			       reg_a(meta->paired_st->dst_reg * 2), off,
+			       len - 1, true);
+	} else if (IS_ALIGNED(len, 4)) {
+		/* Use single indirect_ref write32. */
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
+		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			       reg_a(meta->paired_st->dst_reg * 2), off,
+			       xfer_num - 1, true);
+	} else if (len <= 40) {
+		/* Use one direct_ref write32 to write the first 32-bytes, then
+		 * another direct_ref write8 to write the remaining bytes.
+		 */
+		emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			 reg_a(meta->paired_st->dst_reg * 2), off, 7,
+			 true);
+
+		off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
+				      imm_b(nfp_prog));
+		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
+			 reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
+			 true);
+	} else {
+		/* Use one indirect_ref write32 to write 4-bytes aligned length,
+		 * then another direct_ref write8 to write the remaining bytes.
+		 */
+		u8 new_off;
+
+		wrp_immed(nfp_prog, reg_none(),
+			  CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
+		emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
+			       reg_a(meta->paired_st->dst_reg * 2), off,
+			       xfer_num - 2, true);
+		new_off = meta->paired_st->off + (xfer_num - 1) * 4;
+		off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
+		emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
+			 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
+			 (len & 0x3) - 1, true);
+	}
+
+	/* TODO: The following extra load is to make sure data flow be identical
+	 *  before and after we do memory copy optimization.
+	 *
+	 *  The load destination register is not guaranteed to be dead, so we
+	 *  need to make sure it is loaded with the value the same as before
+	 *  this transformation.
+	 *
+	 *  These extra loads could be removed once we have accurate register
+	 *  usage information.
+	 */
+	if (descending_seq)
+		xfer_num = 0;
+	else if (BPF_SIZE(meta->insn.code) != BPF_DW)
+		xfer_num = xfer_num - 1;
+	else
+		xfer_num = xfer_num - 2;
+
+	switch (BPF_SIZE(meta->insn.code)) {
+	case BPF_B:
+		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+				reg_xfer(xfer_num), 1,
+				IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
+		break;
+	case BPF_H:
+		wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+				reg_xfer(xfer_num), 2, (len & 3) ^ 2);
+		break;
+	case BPF_W:
+		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+			reg_xfer(0));
+		break;
+	case BPF_DW:
+		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+			reg_xfer(xfer_num));
+		wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
+			reg_xfer(xfer_num + 1));
+		break;
+	}
+
+	if (BPF_SIZE(meta->insn.code) != BPF_DW)
+		wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+	return 0;
+}
+
 static int
 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
 {
@@ -583,7 +739,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
 		 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
 	emit_alu(nfp_prog, reg_none(),
 		 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
-	wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
 
 	/* Load data */
 	return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
@@ -596,7 +752,7 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
 	/* Check packet length */
 	tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
 	emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
-	wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+	emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
 
 	/* Load data */
 	tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
@@ -975,9 +1131,6 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 {
 	const struct bpf_insn *insn = &meta->insn;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
 			 insn->src_reg * 2, br_mask, insn->off);
 	wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
@@ -995,9 +1148,6 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	u8 reg = insn->dst_reg * 2;
 	swreg tmp_reg;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
 	if (!swap)
 		emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
@@ -1027,9 +1177,6 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	areg = insn->dst_reg * 2;
 	breg = insn->src_reg * 2;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (swap) {
 		areg ^= breg;
 		breg ^= areg;
@@ -1052,6 +1199,86 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
 		      SHF_SC_R_ROT, 16);
 }
 
+static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
+	struct nfp_bpf_cap_adjust_head *adjust_head;
+	u32 ret_einval, end;
+
+	adjust_head = &nfp_prog->bpf->adjust_head;
+
+	/* Optimized version - 5 vs 14 cycles */
+	if (nfp_prog->adjust_head_location != UINT_MAX) {
+		if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
+			return -EINVAL;
+
+		emit_alu(nfp_prog, pptr_reg(nfp_prog),
+			 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
+		emit_alu(nfp_prog, plen_reg(nfp_prog),
+			 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+		emit_alu(nfp_prog, pv_len(nfp_prog),
+			 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+		wrp_immed(nfp_prog, reg_both(0), 0);
+		wrp_immed(nfp_prog, reg_both(1), 0);
+
+		/* TODO: when adjust head is guaranteed to succeed we can
+		 * also eliminate the following if (r0 == 0) branch.
+		 */
+
+		return 0;
+	}
+
+	ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
+	end = ret_einval + 2;
+
+	/* We need to use a temp because offset is just a part of the pkt ptr */
+	emit_alu(nfp_prog, tmp,
+		 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
+
+	/* Validate result will fit within FW datapath constraints */
+	emit_alu(nfp_prog, reg_none(),
+		 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
+	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+	emit_alu(nfp_prog, reg_none(),
+		 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
+	emit_br(nfp_prog, BR_BLO, ret_einval, 0);
+
+	/* Validate the length is at least ETH_HLEN */
+	emit_alu(nfp_prog, tmp_len,
+		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+	emit_alu(nfp_prog, reg_none(),
+		 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
+	emit_br(nfp_prog, BR_BMI, ret_einval, 0);
+
+	/* Load the ret code */
+	wrp_immed(nfp_prog, reg_both(0), 0);
+	wrp_immed(nfp_prog, reg_both(1), 0);
+
+	/* Modify the packet metadata */
+	emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
+
+	/* Skip over the -EINVAL ret code (defer 2) */
+	emit_br(nfp_prog, BR_UNC, end, 2);
+
+	emit_alu(nfp_prog, plen_reg(nfp_prog),
+		 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+	emit_alu(nfp_prog, pv_len(nfp_prog),
+		 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
+
+	/* return -EINVAL target */
+	if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
+		return -EINVAL;
+
+	wrp_immed(nfp_prog, reg_both(0), -22);
+	wrp_immed(nfp_prog, reg_both(1), ~0);
+
+	if (!nfp_prog_confirm_current_offset(nfp_prog, end))
+		return -EINVAL;
+
+	return 0;
+}
+
 /* --- Callbacks --- */
 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
@@ -1494,6 +1721,9 @@ static int
 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	unsigned int size)
 {
+	if (meta->ldst_gather_len)
+		return nfp_cpp_memcpy(nfp_prog, meta);
+
 	if (meta->ptr.type == PTR_TO_CTX) {
 		if (nfp_prog->type == BPF_PROG_TYPE_XDP)
 			return mem_ldx_xdp(nfp_prog, meta, size);
@@ -1630,8 +1860,6 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 
 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	if (meta->insn.off < 0) /* TODO */
-		return -EOPNOTSUPP;
 	emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
 
 	return 0;
@@ -1646,9 +1874,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	or1 = reg_a(insn->dst_reg * 2);
 	or2 = reg_b(insn->dst_reg * 2 + 1);
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (imm & ~0U) {
 		tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
 		emit_alu(nfp_prog, imm_a(nfp_prog),
@@ -1689,15 +1914,32 @@ static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
 }
 
+static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true);
+}
+
 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	const struct bpf_insn *insn = &meta->insn;
 	u64 imm = insn->imm; /* sign extend */
 	swreg tmp_reg;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (!imm) {
 		meta->skip = true;
 		return 0;
@@ -1726,9 +1968,6 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	u64 imm = insn->imm; /* sign extend */
 	swreg tmp_reg;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	if (!imm) {
 		emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
 			 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
@@ -1753,9 +1992,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	const struct bpf_insn *insn = &meta->insn;
 
-	if (insn->off < 0) /* TODO */
-		return -EOPNOTSUPP;
-
 	emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
 		 ALU_OP_XOR, reg_b(insn->src_reg * 2));
 	emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
@@ -1787,6 +2023,26 @@ static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
 }
 
+static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true);
+}
+
+static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false);
+}
+
+static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false);
+}
+
+static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true);
+}
+
 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
 	return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
@@ -1797,9 +2053,20 @@ static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 	return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
 }
 
+static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+	switch (meta->insn.imm) {
+	case BPF_FUNC_xdp_adjust_head:
+		return adjust_head(nfp_prog, meta);
+	default:
+		WARN_ONCE(1, "verifier allowed unsupported function\n");
+		return -EOPNOTSUPP;
+	}
+}
+
 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
 {
-	wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
 
 	return 0;
 }
@@ -1860,6 +2127,10 @@ static const instr_cb_t instr_cb[256] = {
 	[BPF_JMP | BPF_JGE | BPF_K] =	jge_imm,
 	[BPF_JMP | BPF_JLT | BPF_K] =	jlt_imm,
 	[BPF_JMP | BPF_JLE | BPF_K] =	jle_imm,
+	[BPF_JMP | BPF_JSGT | BPF_K] =  jsgt_imm,
+	[BPF_JMP | BPF_JSGE | BPF_K] =  jsge_imm,
+	[BPF_JMP | BPF_JSLT | BPF_K] =  jslt_imm,
+	[BPF_JMP | BPF_JSLE | BPF_K] =  jsle_imm,
 	[BPF_JMP | BPF_JSET | BPF_K] =	jset_imm,
 	[BPF_JMP | BPF_JNE | BPF_K] =	jne_imm,
 	[BPF_JMP | BPF_JEQ | BPF_X] =	jeq_reg,
@@ -1867,99 +2138,64 @@ static const instr_cb_t instr_cb[256] = {
 	[BPF_JMP | BPF_JGE | BPF_X] =	jge_reg,
 	[BPF_JMP | BPF_JLT | BPF_X] =	jlt_reg,
 	[BPF_JMP | BPF_JLE | BPF_X] =	jle_reg,
+	[BPF_JMP | BPF_JSGT | BPF_X] =  jsgt_reg,
+	[BPF_JMP | BPF_JSGE | BPF_X] =  jsge_reg,
+	[BPF_JMP | BPF_JSLT | BPF_X] =  jslt_reg,
+	[BPF_JMP | BPF_JSLE | BPF_X] =  jsle_reg,
 	[BPF_JMP | BPF_JSET | BPF_X] =	jset_reg,
 	[BPF_JMP | BPF_JNE | BPF_X] =	jne_reg,
+	[BPF_JMP | BPF_CALL] =		call,
 	[BPF_JMP | BPF_EXIT] =		goto_out,
 };
 
-/* --- Misc code --- */
-static void br_set_offset(u64 *instr, u16 offset)
-{
-	u16 addr_lo, addr_hi;
-
-	addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
-	addr_hi = offset != addr_lo;
-	*instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
-	*instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
-	*instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
-}
-
 /* --- Assembler logic --- */
 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
 {
-	struct nfp_insn_meta *meta, *next;
-	u32 off, br_idx;
-	u32 idx;
+	struct nfp_insn_meta *meta, *jmp_dst;
+	u32 idx, br_idx;
 
-	nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
 		if (meta->skip)
 			continue;
+		if (meta->insn.code == (BPF_JMP | BPF_CALL))
+			continue;
 		if (BPF_CLASS(meta->insn.code) != BPF_JMP)
 			continue;
 
-		br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+		if (list_is_last(&meta->l, &nfp_prog->insns))
+			br_idx = nfp_prog->last_bpf_off;
+		else
+			br_idx = list_next_entry(meta, l)->off - 1;
+
 		if (!nfp_is_br(nfp_prog->prog[br_idx])) {
 			pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
 			       br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
 			return -ELOOP;
 		}
 		/* Leave special branches for later */
-		if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+		if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
+		    RELO_BR_REL)
 			continue;
 
-		/* Find the target offset in assembler realm */
-		off = meta->insn.off;
-		if (!off) {
-			pr_err("Fixup found zero offset!!\n");
+		if (!meta->jmp_dst) {
+			pr_err("Non-exit jump doesn't have destination info recorded!!\n");
 			return -ELOOP;
 		}
 
-		while (off && nfp_meta_has_next(nfp_prog, next)) {
-			next = nfp_meta_next(next);
-			off--;
-		}
-		if (off) {
-			pr_err("Fixup found too large jump!! %d\n", off);
-			return -ELOOP;
-		}
+		jmp_dst = meta->jmp_dst;
 
-		if (next->skip) {
+		if (jmp_dst->skip) {
 			pr_err("Branch landing on removed instruction!!\n");
 			return -ELOOP;
 		}
 
-		for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
-		     idx <= br_idx; idx++) {
+		for (idx = meta->off; idx <= br_idx; idx++) {
 			if (!nfp_is_br(nfp_prog->prog[idx]))
 				continue;
-			br_set_offset(&nfp_prog->prog[idx], next->off);
+			br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
 		}
 	}
 
-	/* Fixup 'goto out's separately, they can be scattered around */
-	for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
-		enum br_special special;
-
-		if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
-			continue;
-
-		special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
-		switch (special) {
-		case OP_BR_NORMAL:
-			break;
-		case OP_BR_GO_OUT:
-			br_set_offset(&nfp_prog->prog[br_idx],
-				      nfp_prog->tgt_out);
-			break;
-		case OP_BR_GO_ABORT:
-			br_set_offset(&nfp_prog->prog[br_idx],
-				      nfp_prog->tgt_abort);
-			break;
-		}
-
-		nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
-	}
-
 	return 0;
 }
 
@@ -1987,7 +2223,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
 	/* Target for aborts */
 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
@@ -2014,7 +2250,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
 	emit_shf(nfp_prog, reg_b(2),
 		 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	emit_shf(nfp_prog, reg_b(2),
 		 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
@@ -2033,7 +2269,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
 	/* Target for aborts */
 	nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
@@ -2054,7 +2290,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
 	emit_shf(nfp_prog, reg_b(2),
 		 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
 
-	emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+	emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
 
 	wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
 	emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
@@ -2105,6 +2341,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
 		nfp_prog->n_translated++;
 	}
 
+	nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
+
 	nfp_outro(nfp_prog);
 	if (nfp_prog->error)
 		return nfp_prog->error;
@@ -2173,6 +2411,9 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
 		if (next.src_reg || next.dst_reg)
 			continue;
 
+		if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
+			continue;
+
 		meta2->skip = true;
 	}
 }
@@ -2209,40 +2450,294 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
 		if (next1.imm != 0x20 || next2.imm != 0x20)
 			continue;
 
+		if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
+		    meta3->flags & FLAG_INSN_IS_JUMP_DST)
+			continue;
+
 		meta2->skip = true;
 		meta3->skip = true;
 	}
 }
 
+/* load/store pair that forms memory copy sould look like the following:
+ *
+ *   ld_width R, [addr_src + offset_src]
+ *   st_width [addr_dest + offset_dest], R
+ *
+ * The destination register of load and source register of store should
+ * be the same, load and store should also perform at the same width.
+ * If either of addr_src or addr_dest is stack pointer, we don't do the
+ * CPP optimization as stack is modelled by registers on NFP.
+ */
+static bool
+curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
+		    struct nfp_insn_meta *st_meta)
+{
+	struct bpf_insn *ld = &ld_meta->insn;
+	struct bpf_insn *st = &st_meta->insn;
+
+	if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
+		return false;
+
+	if (ld_meta->ptr.type != PTR_TO_PACKET)
+		return false;
+
+	if (st_meta->ptr.type != PTR_TO_PACKET)
+		return false;
+
+	if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
+		return false;
+
+	if (ld->dst_reg != st->src_reg)
+		return false;
+
+	/* There is jump to the store insn in this pair. */
+	if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
+		return false;
+
+	return true;
+}
+
+/* Currently, we only support chaining load/store pairs if:
+ *
+ *  - Their address base registers are the same.
+ *  - Their address offsets are in the same order.
+ *  - They operate at the same memory width.
+ *  - There is no jump into the middle of them.
+ */
+static bool
+curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
+			      struct nfp_insn_meta *st_meta,
+			      struct bpf_insn *prev_ld,
+			      struct bpf_insn *prev_st)
+{
+	u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
+	struct bpf_insn *ld = &ld_meta->insn;
+	struct bpf_insn *st = &st_meta->insn;
+	s16 prev_ld_off, prev_st_off;
+
+	/* This pair is the start pair. */
+	if (!prev_ld)
+		return true;
+
+	prev_size = BPF_LDST_BYTES(prev_ld);
+	curr_size = BPF_LDST_BYTES(ld);
+	prev_ld_base = prev_ld->src_reg;
+	prev_st_base = prev_st->dst_reg;
+	prev_ld_dst = prev_ld->dst_reg;
+	prev_ld_off = prev_ld->off;
+	prev_st_off = prev_st->off;
+
+	if (ld->dst_reg != prev_ld_dst)
+		return false;
+
+	if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
+		return false;
+
+	if (curr_size != prev_size)
+		return false;
+
+	/* There is jump to the head of this pair. */
+	if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
+		return false;
+
+	/* Both in ascending order. */
+	if (prev_ld_off + prev_size == ld->off &&
+	    prev_st_off + prev_size == st->off)
+		return true;
+
+	/* Both in descending order. */
+	if (ld->off + curr_size == prev_ld_off &&
+	    st->off + curr_size == prev_st_off)
+		return true;
+
+	return false;
+}
+
+/* Return TRUE if cross memory access happens. Cross memory access means
+ * store area is overlapping with load area that a later load might load
+ * the value from previous store, for this case we can't treat the sequence
+ * as an memory copy.
+ */
+static bool
+cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
+		 struct nfp_insn_meta *head_st_meta)
+{
+	s16 head_ld_off, head_st_off, ld_off;
+
+	/* Different pointer types does not overlap. */
+	if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
+		return false;
+
+	/* load and store are both PTR_TO_PACKET, check ID info.  */
+	if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
+		return true;
+
+	/* Canonicalize the offsets. Turn all of them against the original
+	 * base register.
+	 */
+	head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
+	head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
+	ld_off = ld->off + head_ld_meta->ptr.off;
+
+	/* Ascending order cross. */
+	if (ld_off > head_ld_off &&
+	    head_ld_off < head_st_off && ld_off >= head_st_off)
+		return true;
+
+	/* Descending order cross. */
+	if (ld_off < head_ld_off &&
+	    head_ld_off > head_st_off && ld_off <= head_st_off)
+		return true;
+
+	return false;
+}
+
+/* This pass try to identify the following instructoin sequences.
+ *
+ *   load R, [regA + offA]
+ *   store [regB + offB], R
+ *   load R, [regA + offA + const_imm_A]
+ *   store [regB + offB + const_imm_A], R
+ *   load R, [regA + offA + 2 * const_imm_A]
+ *   store [regB + offB + 2 * const_imm_A], R
+ *   ...
+ *
+ * Above sequence is typically generated by compiler when lowering
+ * memcpy. NFP prefer using CPP instructions to accelerate it.
+ */
+static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
+{
+	struct nfp_insn_meta *head_ld_meta = NULL;
+	struct nfp_insn_meta *head_st_meta = NULL;
+	struct nfp_insn_meta *meta1, *meta2;
+	struct bpf_insn *prev_ld = NULL;
+	struct bpf_insn *prev_st = NULL;
+	u8 count = 0;
+
+	nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+		struct bpf_insn *ld = &meta1->insn;
+		struct bpf_insn *st = &meta2->insn;
+
+		/* Reset record status if any of the following if true:
+		 *   - The current insn pair is not load/store.
+		 *   - The load/store pair doesn't chain with previous one.
+		 *   - The chained load/store pair crossed with previous pair.
+		 *   - The chained load/store pair has a total size of memory
+		 *     copy beyond 128 bytes which is the maximum length a
+		 *     single NFP CPP command can transfer.
+		 */
+		if (!curr_pair_is_memcpy(meta1, meta2) ||
+		    !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
+						   prev_st) ||
+		    (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
+						       head_st_meta) ||
+				      head_ld_meta->ldst_gather_len >= 128))) {
+			if (!count)
+				continue;
+
+			if (count > 1) {
+				s16 prev_ld_off = prev_ld->off;
+				s16 prev_st_off = prev_st->off;
+				s16 head_ld_off = head_ld_meta->insn.off;
+
+				if (prev_ld_off < head_ld_off) {
+					head_ld_meta->insn.off = prev_ld_off;
+					head_st_meta->insn.off = prev_st_off;
+					head_ld_meta->ldst_gather_len =
+						-head_ld_meta->ldst_gather_len;
+				}
+
+				head_ld_meta->paired_st = &head_st_meta->insn;
+				head_st_meta->skip = true;
+			} else {
+				head_ld_meta->ldst_gather_len = 0;
+			}
+
+			/* If the chain is ended by an load/store pair then this
+			 * could serve as the new head of the the next chain.
+			 */
+			if (curr_pair_is_memcpy(meta1, meta2)) {
+				head_ld_meta = meta1;
+				head_st_meta = meta2;
+				head_ld_meta->ldst_gather_len =
+					BPF_LDST_BYTES(ld);
+				meta1 = nfp_meta_next(meta1);
+				meta2 = nfp_meta_next(meta2);
+				prev_ld = ld;
+				prev_st = st;
+				count = 1;
+			} else {
+				head_ld_meta = NULL;
+				head_st_meta = NULL;
+				prev_ld = NULL;
+				prev_st = NULL;
+				count = 0;
+			}
+
+			continue;
+		}
+
+		if (!head_ld_meta) {
+			head_ld_meta = meta1;
+			head_st_meta = meta2;
+		} else {
+			meta1->skip = true;
+			meta2->skip = true;
+		}
+
+		head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
+		meta1 = nfp_meta_next(meta1);
+		meta2 = nfp_meta_next(meta2);
+		prev_ld = ld;
+		prev_st = st;
+		count++;
+	}
+}
+
 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
 {
 	nfp_bpf_opt_reg_init(nfp_prog);
 
 	nfp_bpf_opt_ld_mask(nfp_prog);
 	nfp_bpf_opt_ld_shift(nfp_prog);
+	nfp_bpf_opt_ldst_gather(nfp_prog);
 
 	return 0;
 }
 
-static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
 {
+	__le64 *ustore = (__force __le64 *)prog;
 	int i;
 
-	for (i = 0; i < nfp_prog->prog_len; i++) {
+	for (i = 0; i < len; i++) {
 		int err;
 
-		err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+		err = nfp_ustore_check_valid_no_ecc(prog[i]);
 		if (err)
 			return err;
 
-		nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
-
-		ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+		ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
 	}
 
 	return 0;
 }
 
+static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
+{
+	void *prog;
+
+	prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
+	if (!prog)
+		return;
+
+	nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
+	memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
+	kvfree(nfp_prog->prog);
+	nfp_prog->prog = prog;
+}
+
 int nfp_bpf_jit(struct nfp_prog *nfp_prog)
 {
 	int ret;
@@ -2258,5 +2753,78 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
 		return -EINVAL;
 	}
 
-	return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
+	nfp_bpf_prog_trim(nfp_prog);
+
+	return ret;
+}
+
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
+{
+	struct nfp_insn_meta *meta;
+
+	/* Another pass to record jump information. */
+	list_for_each_entry(meta, &nfp_prog->insns, l) {
+		u64 code = meta->insn.code;
+
+		if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
+		    BPF_OP(code) != BPF_CALL) {
+			struct nfp_insn_meta *dst_meta;
+			unsigned short dst_indx;
+
+			dst_indx = meta->n + 1 + meta->insn.off;
+			dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
+						     cnt);
+
+			meta->jmp_dst = dst_meta;
+			dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
+		}
+	}
+}
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
+{
+	unsigned int i;
+	u64 *prog;
+	int err;
+
+	prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
+		       GFP_KERNEL);
+	if (!prog)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < nfp_prog->prog_len; i++) {
+		enum nfp_relo_type special;
+
+		special = FIELD_GET(OP_RELO_TYPE, prog[i]);
+		switch (special) {
+		case RELO_NONE:
+			continue;
+		case RELO_BR_REL:
+			br_add_offset(&prog[i], bv->start_off);
+			break;
+		case RELO_BR_GO_OUT:
+			br_set_offset(&prog[i],
+				      nfp_prog->tgt_out + bv->start_off);
+			break;
+		case RELO_BR_GO_ABORT:
+			br_set_offset(&prog[i],
+				      nfp_prog->tgt_abort + bv->start_off);
+			break;
+		case RELO_BR_NEXT_PKT:
+			br_set_offset(&prog[i], bv->tgt_done);
+			break;
+		}
+
+		prog[i] &= ~OP_RELO_TYPE;
+	}
+
+	err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
+	if (err)
+		goto err_free_prog;
+
+	return prog;
+
+err_free_prog:
+	kfree(prog);
+	return ERR_PTR(err);
 }
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 13190aa..e8cfe30 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -34,10 +34,12 @@
 #include <net/pkt_cls.h>
 
 #include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
 #include "../nfp_app.h"
 #include "../nfp_main.h"
 #include "../nfp_net.h"
 #include "../nfp_port.h"
+#include "fw.h"
 #include "main.h"
 
 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
@@ -85,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
 static int
 nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
 {
+	struct nfp_bpf_vnic *bv;
 	int err;
 
-	nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
-	if (!nn->app_priv)
+	bv = kzalloc(sizeof(*bv), GFP_KERNEL);
+	if (!bv)
 		return -ENOMEM;
+	nn->app_priv = bv;
 
 	err = nfp_app_nic_vnic_alloc(app, nn, id);
 	if (err)
 		goto err_free_priv;
 
+	bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+	bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
 	return 0;
 err_free_priv:
 	kfree(nn->app_priv);
@@ -105,8 +112,6 @@ static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
 {
 	struct nfp_bpf_vnic *bv = nn->app_priv;
 
-	if (nn->dp.bpf_offload_xdp)
-		nfp_bpf_xdp_offload(app, nn, NULL);
 	WARN_ON(bv->tc_prog);
 	kfree(bv);
 }
@@ -191,13 +196,148 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
 
 static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
 {
-	return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+	struct nfp_bpf_vnic *bv = nn->app_priv;
+
+	return !!bv->tc_prog;
+}
+
+static int
+nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+	struct nfp_net *nn = netdev_priv(netdev);
+	unsigned int max_mtu;
+
+	if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+		return 0;
+
+	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+	if (new_mtu > max_mtu) {
+		nn_info(nn, "BPF offload active, MTU over %u not supported\n",
+			max_mtu);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int
+nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
+			      u32 length)
+{
+	struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
+	struct nfp_cpp *cpp = bpf->app->pf->cpp;
+
+	if (length < sizeof(*cap)) {
+		nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
+		return -EINVAL;
+	}
+
+	bpf->adjust_head.flags = readl(&cap->flags);
+	bpf->adjust_head.off_min = readl(&cap->off_min);
+	bpf->adjust_head.off_max = readl(&cap->off_max);
+	bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
+	bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
+
+	if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
+		nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
+		return -EINVAL;
+	}
+	if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
+	    !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
+		nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
+		memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
+		return 0;
+	}
+
+	return 0;
+}
+
+static int nfp_bpf_parse_capabilities(struct nfp_app *app)
+{
+	struct nfp_cpp *cpp = app->pf->cpp;
+	struct nfp_cpp_area *area;
+	u8 __iomem *mem, *start;
+
+	mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
+			    8, &area);
+	if (IS_ERR(mem))
+		return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
+
+	start = mem;
+	while (mem - start + 8 < nfp_cpp_area_size(area)) {
+		u8 __iomem *value;
+		u32 type, length;
+
+		type = readl(mem);
+		length = readl(mem + 4);
+		value = mem + 8;
+
+		mem += 8 + length;
+		if (mem - start > nfp_cpp_area_size(area))
+			goto err_release_free;
+
+		switch (type) {
+		case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
+			if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
+							  length))
+				goto err_release_free;
+			break;
+		default:
+			nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
+			break;
+		}
+	}
+	if (mem - start != nfp_cpp_area_size(area)) {
+		nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
+			mem - start, nfp_cpp_area_size(area));
+		goto err_release_free;
+	}
+
+	nfp_cpp_area_release_free(area);
+
+	return 0;
+
+err_release_free:
+	nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
+	nfp_cpp_area_release_free(area);
+	return -EINVAL;
+}
+
+static int nfp_bpf_init(struct nfp_app *app)
+{
+	struct nfp_app_bpf *bpf;
+	int err;
+
+	bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
+	if (!bpf)
+		return -ENOMEM;
+	bpf->app = app;
+	app->priv = bpf;
+
+	err = nfp_bpf_parse_capabilities(app);
+	if (err)
+		goto err_free_bpf;
+
+	return 0;
+
+err_free_bpf:
+	kfree(bpf);
+	return err;
+}
+
+static void nfp_bpf_clean(struct nfp_app *app)
+{
+	kfree(app->priv);
 }
 
 const struct nfp_app_type app_bpf = {
 	.id		= NFP_APP_BPF_NIC,
 	.name		= "ebpf",
 
+	.init		= nfp_bpf_init,
+	.clean		= nfp_bpf_clean,
+
+	.change_mtu	= nfp_bpf_change_mtu,
+
 	.extra_cap	= nfp_bpf_extra_cap,
 
 	.vnic_alloc	= nfp_bpf_vnic_alloc,
@@ -205,9 +345,6 @@ const struct nfp_app_type app_bpf = {
 
 	.setup_tc	= nfp_bpf_setup_tc,
 	.tc_busy	= nfp_bpf_tc_busy,
+	.bpf		= nfp_ndo_bpf,
 	.xdp_offload	= nfp_bpf_xdp_offload,
-
-	.bpf_verifier_prep	= nfp_bpf_verifier_prep,
-	.bpf_translate		= nfp_bpf_translate,
-	.bpf_destroy		= nfp_bpf_destroy,
 };
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 57b6043..66381af 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -42,17 +42,28 @@
 
 #include "../nfp_asm.h"
 
-/* For branch fixup logic use up-most byte of branch instruction as scratch
+/* For relocation logic use up-most byte of branch instruction as scratch
  * area.  Remember to clear this before sending instructions to HW!
  */
-#define OP_BR_SPECIAL	0xff00000000000000ULL
+#define OP_RELO_TYPE	0xff00000000000000ULL
 
-enum br_special {
-	OP_BR_NORMAL = 0,
-	OP_BR_GO_OUT,
-	OP_BR_GO_ABORT,
+enum nfp_relo_type {
+	RELO_NONE = 0,
+	/* standard internal jumps */
+	RELO_BR_REL,
+	/* internal jumps to parts of the outro */
+	RELO_BR_GO_OUT,
+	RELO_BR_GO_ABORT,
+	/* external jumps to fixed addresses */
+	RELO_BR_NEXT_PKT,
 };
 
+/* To make absolute relocated branches (branches other than RELO_BR_REL)
+ * distinguishable in user space dumps from normal jumps, add a large offset
+ * to them.
+ */
+#define BR_OFF_RELO		15000
+
 enum static_regs {
 	STATIC_REG_IMM		= 21, /* Bank AB */
 	STATIC_REG_STACK	= 22, /* Bank A */
@@ -78,6 +89,29 @@ enum pkt_vec {
 #define NFP_BPF_ABI_FLAGS	reg_imm(0)
 #define   NFP_BPF_ABI_FLAG_MARK	1
 
+/**
+ * struct nfp_app_bpf - bpf app priv structure
+ * @app:		backpointer to the app
+ *
+ * @adjust_head:	adjust head capability
+ * @flags:		extra flags for adjust head
+ * @off_min:		minimal packet offset within buffer required
+ * @off_max:		maximum packet offset within buffer required
+ * @guaranteed_sub:	amount of negative adjustment guaranteed possible
+ * @guaranteed_add:	amount of positive adjustment guaranteed possible
+ */
+struct nfp_app_bpf {
+	struct nfp_app *app;
+
+	struct nfp_bpf_cap_adjust_head {
+		u32 flags;
+		int off_min;
+		int off_max;
+		int guaranteed_sub;
+		int guaranteed_add;
+	} adjust_head;
+};
+
 struct nfp_prog;
 struct nfp_insn_meta;
 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
@@ -89,23 +123,39 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
 #define nfp_meta_next(meta)	list_next_entry(meta, l)
 #define nfp_meta_prev(meta)	list_prev_entry(meta, l)
 
+#define FLAG_INSN_IS_JUMP_DST	BIT(0)
+
 /**
  * struct nfp_insn_meta - BPF instruction wrapper
  * @insn: BPF instruction
  * @ptr: pointer type for memory operations
+ * @ldst_gather_len: memcpy length gathered from load/store sequence
+ * @paired_st: the paired store insn at the head of the sequence
+ * @arg2: arg2 for call instructions
  * @ptr_not_const: pointer is not always constant
+ * @jmp_dst: destination info for jump instructions
  * @off: index of first generated machine instruction (in nfp_prog.prog)
  * @n: eBPF instruction number
+ * @flags: eBPF instruction extra optimization flags
  * @skip: skip this instruction (optimized out)
  * @double_cb: callback for second part of the instruction
  * @l: link on nfp_prog->insns list
  */
 struct nfp_insn_meta {
 	struct bpf_insn insn;
-	struct bpf_reg_state ptr;
-	bool ptr_not_const;
+	union {
+		struct {
+			struct bpf_reg_state ptr;
+			struct bpf_insn *paired_st;
+			s16 ldst_gather_len;
+			bool ptr_not_const;
+		};
+		struct nfp_insn_meta *jmp_dst;
+		struct bpf_reg_state arg2;
+	};
 	unsigned int off;
 	unsigned short n;
+	unsigned short flags;
 	bool skip;
 	instr_cb_t double_cb;
 
@@ -134,23 +184,36 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
 	return BPF_MODE(meta->insn.code);
 }
 
+static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
+{
+	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
+}
+
+static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
+{
+	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
+}
+
 /**
  * struct nfp_prog - nfp BPF program
+ * @bpf: backpointer to the bpf app priv structure
  * @prog: machine code
  * @prog_len: number of valid instructions in @prog array
  * @__prog_alloc_len: alloc size of @prog array
  * @verifier_meta: temporary storage for verifier's insn meta
  * @type: BPF program type
- * @start_off: address of the first instruction in the memory
+ * @last_bpf_off: address of the last instruction translated from BPF
  * @tgt_out: jump target for normal exit
  * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
- * @tgt_done: jump target to get the next packet
  * @n_translated: number of successfully translated instructions (for errors)
  * @error: error code if something went wrong
  * @stack_depth: max stack depth from the verifier
+ * @adjust_head_location: if program has single adjust head call - the insn no.
  * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
  */
 struct nfp_prog {
+	struct nfp_app_bpf *bpf;
+
 	u64 *prog;
 	unsigned int prog_len;
 	unsigned int __prog_alloc_len;
@@ -159,15 +222,15 @@ struct nfp_prog {
 
 	enum bpf_prog_type type;
 
-	unsigned int start_off;
+	unsigned int last_bpf_off;
 	unsigned int tgt_out;
 	unsigned int tgt_abort;
-	unsigned int tgt_done;
 
 	unsigned int n_translated;
 	int error;
 
 	unsigned int stack_depth;
+	unsigned int adjust_head_location;
 
 	struct list_head insns;
 };
@@ -175,26 +238,32 @@ struct nfp_prog {
 /**
  * struct nfp_bpf_vnic - per-vNIC BPF priv structure
  * @tc_prog:	currently loaded cls_bpf program
+ * @start_off:	address of the first instruction in the memory
+ * @tgt_done:	jump target to get the next packet
  */
 struct nfp_bpf_vnic {
 	struct bpf_prog *tc_prog;
+	unsigned int start_off;
+	unsigned int tgt_done;
 };
 
+void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
 int nfp_bpf_jit(struct nfp_prog *prog);
 
-extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
+extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
 
 struct netdev_bpf;
 struct nfp_app;
 struct nfp_net;
 
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
+		struct netdev_bpf *bpf);
 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
 			bool old_prog);
 
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
-			  struct netdev_bpf *bpf);
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
-		      struct bpf_prog *prog);
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
-		    struct bpf_prog *prog);
+struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+		  unsigned int insn_idx, unsigned int n_insns);
+
+void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index bc879ae..320b225 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -42,12 +42,14 @@
 #include <linux/jiffies.h>
 #include <linux/timer.h>
 #include <linux/list.h>
+#include <linux/mm.h>
 
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
 #include <net/tc_act/tc_mirred.h>
 
 #include "main.h"
+#include "../nfp_app.h"
 #include "../nfp_net_ctrl.h"
 #include "../nfp_net.h"
 
@@ -55,11 +57,10 @@ static int
 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
 		 unsigned int cnt)
 {
+	struct nfp_insn_meta *meta;
 	unsigned int i;
 
 	for (i = 0; i < cnt; i++) {
-		struct nfp_insn_meta *meta;
-
 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
 		if (!meta)
 			return -ENOMEM;
@@ -70,6 +71,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
 		list_add_tail(&meta->l, &nfp_prog->insns);
 	}
 
+	nfp_bpf_jit_prepare(nfp_prog, cnt);
+
 	return 0;
 }
 
@@ -84,8 +87,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
 	kfree(nfp_prog);
 }
 
-int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
-			  struct netdev_bpf *bpf)
+static int
+nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
+		      struct netdev_bpf *bpf)
 {
 	struct bpf_prog *prog = bpf->verifier.prog;
 	struct nfp_prog *nfp_prog;
@@ -98,6 +102,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
 
 	INIT_LIST_HEAD(&nfp_prog->insns);
 	nfp_prog->type = prog->type;
+	nfp_prog->bpf = app->priv;
 
 	ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
 	if (ret)
@@ -114,8 +119,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
 	return ret;
 }
 
-int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
-		      struct bpf_prog *prog)
+static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
 {
 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 	unsigned int stack_size;
@@ -127,37 +131,48 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
 			prog->aux->stack_depth, stack_size);
 		return -EOPNOTSUPP;
 	}
-
-	nfp_prog->stack_depth = prog->aux->stack_depth;
-	nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
-	nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+	nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
 
 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
 	nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
 
-	nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
+	nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
 	if (!nfp_prog->prog)
 		return -ENOMEM;
 
 	return nfp_bpf_jit(nfp_prog);
 }
 
-int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
-		    struct bpf_prog *prog)
+static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
 {
 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 
-	kfree(nfp_prog->prog);
+	kvfree(nfp_prog->prog);
 	nfp_prog_free(nfp_prog);
 
 	return 0;
 }
 
+int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
+{
+	switch (bpf->command) {
+	case BPF_OFFLOAD_VERIFIER_PREP:
+		return nfp_bpf_verifier_prep(app, nn, bpf);
+	case BPF_OFFLOAD_TRANSLATE:
+		return nfp_bpf_translate(nn, bpf->offload.prog);
+	case BPF_OFFLOAD_DESTROY:
+		return nfp_bpf_destroy(nn, bpf->offload.prog);
+	default:
+		return -EINVAL;
+	}
+}
+
 static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
 {
 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
 	unsigned int max_mtu;
 	dma_addr_t dma_addr;
+	void *img;
 	int err;
 
 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
@@ -166,11 +181,17 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
 		return -EOPNOTSUPP;
 	}
 
-	dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
+	img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
+	if (IS_ERR(img))
+		return PTR_ERR(img);
+
+	dma_addr = dma_map_single(nn->dp.dev, img,
 				  nfp_prog->prog_len * sizeof(u64),
 				  DMA_TO_DEVICE);
-	if (dma_mapping_error(nn->dp.dev, dma_addr))
+	if (dma_mapping_error(nn->dp.dev, dma_addr)) {
+		kfree(img);
 		return -ENOMEM;
+	}
 
 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
@@ -182,6 +203,7 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
 
 	dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
 			 DMA_TO_DEVICE);
+	kfree(img);
 
 	return err;
 }
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 8d43491..7890d95 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -31,16 +31,18 @@
  * SOFTWARE.
  */
 
-#define pr_fmt(fmt)	"NFP net bpf: " fmt
-
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/kernel.h>
 #include <linux/pkt_cls.h>
 
+#include "fw.h"
 #include "main.h"
 
-static struct nfp_insn_meta *
+#define pr_vlog(env, fmt, ...)	\
+	bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
+
+struct nfp_insn_meta *
 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 		  unsigned int insn_idx, unsigned int n_insns)
 {
@@ -68,6 +70,73 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	return meta;
 }
 
+static void
+nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
+		       struct nfp_insn_meta *meta,
+		       const struct bpf_reg_state *reg2)
+{
+	unsigned int location =	UINT_MAX;
+	int imm;
+
+	/* Datapath usually can give us guarantees on how much adjust head
+	 * can be done without the need for any checks.  Optimize the simple
+	 * case where there is only one adjust head by a constant.
+	 */
+	if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
+		goto exit_set_location;
+	imm = reg2->var_off.value;
+	/* Translator will skip all checks, we need to guarantee min pkt len */
+	if (imm > ETH_ZLEN - ETH_HLEN)
+		goto exit_set_location;
+	if (imm > (int)bpf->adjust_head.guaranteed_add ||
+	    imm < -bpf->adjust_head.guaranteed_sub)
+		goto exit_set_location;
+
+	if (nfp_prog->adjust_head_location) {
+		/* Only one call per program allowed */
+		if (nfp_prog->adjust_head_location != meta->n)
+			goto exit_set_location;
+
+		if (meta->arg2.var_off.value != imm)
+			goto exit_set_location;
+	}
+
+	location = meta->n;
+exit_set_location:
+	nfp_prog->adjust_head_location = location;
+}
+
+static int
+nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
+		   struct nfp_insn_meta *meta)
+{
+	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
+	struct nfp_app_bpf *bpf = nfp_prog->bpf;
+	u32 func_id = meta->insn.imm;
+
+	switch (func_id) {
+	case BPF_FUNC_xdp_adjust_head:
+		if (!bpf->adjust_head.off_max) {
+			pr_vlog(env, "adjust_head not supported by FW\n");
+			return -EOPNOTSUPP;
+		}
+		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
+			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
+			return -EOPNOTSUPP;
+		}
+
+		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
+		break;
+	default:
+		pr_vlog(env, "unsupported function id: %d\n", func_id);
+		return -EOPNOTSUPP;
+	}
+
+	meta->arg2 = *reg2;
+
+	return 0;
+}
+
 static int
 nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 		   struct bpf_verifier_env *env)
@@ -82,7 +151,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 		char tn_buf[48];
 
 		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
-		pr_info("unsupported exit state: %d, var_off: %s\n",
+		pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
 			reg0->type, tn_buf);
 		return -EINVAL;
 	}
@@ -92,7 +161,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 	    imm <= TC_ACT_REDIRECT &&
 	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
 	    imm != TC_ACT_QUEUED) {
-		pr_info("unsupported exit state: %d, imm: %llx\n",
+		pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
 			reg0->type, imm);
 		return -EINVAL;
 	}
@@ -103,12 +172,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
 static int
 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
 			   struct nfp_insn_meta *meta,
-			   const struct bpf_reg_state *reg)
+			   const struct bpf_reg_state *reg,
+			   struct bpf_verifier_env *env)
 {
 	s32 old_off, new_off;
 
 	if (!tnum_is_const(reg->var_off)) {
-		pr_info("variable ptr stack access\n");
+		pr_vlog(env, "variable ptr stack access\n");
 		return -EINVAL;
 	}
 
@@ -126,7 +196,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
 	if (old_off % 4 == new_off % 4)
 		return 0;
 
-	pr_info("stack access changed location was:%d is:%d\n",
+	pr_vlog(env, "stack access changed location was:%d is:%d\n",
 		old_off, new_off);
 	return -EINVAL;
 }
@@ -141,18 +211,18 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
 	if (reg->type != PTR_TO_CTX &&
 	    reg->type != PTR_TO_STACK &&
 	    reg->type != PTR_TO_PACKET) {
-		pr_info("unsupported ptr type: %d\n", reg->type);
+		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
 		return -EINVAL;
 	}
 
 	if (reg->type == PTR_TO_STACK) {
-		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg);
+		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
 		if (err)
 			return err;
 	}
 
 	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
-		pr_info("ptr type changed for instruction %d -> %d\n",
+		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
 			meta->ptr.type, reg->type);
 		return -EINVAL;
 	}
@@ -173,23 +243,25 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
 
 	if (meta->insn.src_reg >= MAX_BPF_REG ||
 	    meta->insn.dst_reg >= MAX_BPF_REG) {
-		pr_err("program uses extended registers - jit hardening?\n");
+		pr_vlog(env, "program uses extended registers - jit hardening?\n");
 		return -EINVAL;
 	}
 
+	if (meta->insn.code == (BPF_JMP | BPF_CALL))
+		return nfp_bpf_check_call(nfp_prog, env, meta);
 	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
 		return nfp_bpf_check_exit(nfp_prog, env);
 
-	if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+	if (is_mbpf_load(meta))
 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
 					 meta->insn.src_reg);
-	if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+	if (is_mbpf_store(meta))
 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
 					 meta->insn.dst_reg);
 
 	return 0;
 }
 
-const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
 	.insn_hook = nfp_verify_insn,
 };
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index c1c595f..b3567a5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -81,6 +81,9 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
 		return tun_type == NFP_FL_TUNNEL_VXLAN;
 
+	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
+		return tun_type == NFP_FL_TUNNEL_GENEVE;
+
 	return false;
 }
 
@@ -93,13 +96,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
 	size_t act_size = sizeof(struct nfp_fl_output);
 	struct net_device *out_dev;
 	u16 tmp_flags;
-	int ifindex;
 
 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 
-	ifindex = tcf_mirred_ifindex(action);
-	out_dev = __dev_get_by_index(dev_net(in_dev), ifindex);
+	out_dev = tcf_mirred_dev(action);
 	if (!out_dev)
 		return -EOPNOTSUPP;
 
@@ -138,11 +139,23 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
 	return 0;
 }
 
-static bool nfp_fl_supported_tun_port(const struct tc_action *action)
+static enum nfp_flower_tun_type
+nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
+				const struct tc_action *action)
 {
 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
+	struct nfp_flower_priv *priv = app->priv;
 
-	return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT);
+	switch (tun->key.tp_dst) {
+	case htons(NFP_FL_VXLAN_PORT):
+		return NFP_FL_TUNNEL_VXLAN;
+	case htons(NFP_FL_GENEVE_PORT):
+		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
+			return NFP_FL_TUNNEL_GENEVE;
+		/* FALLTHROUGH */
+	default:
+		return NFP_FL_TUNNEL_NONE;
+	}
 }
 
 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
@@ -167,38 +180,33 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
 }
 
 static int
-nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
-		 const struct tc_action *action,
-		 struct nfp_fl_pre_tunnel *pre_tun)
+nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+			const struct tc_action *action,
+			struct nfp_fl_pre_tunnel *pre_tun,
+			enum nfp_flower_tun_type tun_type)
 {
-	struct ip_tunnel_info *vxlan = tcf_tunnel_info(action);
-	size_t act_size = sizeof(struct nfp_fl_set_vxlan);
-	u32 tmp_set_vxlan_type_index = 0;
+	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+	u32 tmp_set_ip_tun_type_index = 0;
 	/* Currently support one pre-tunnel so index is always 0. */
 	int pretun_idx = 0;
 
-	if (vxlan->options_len) {
-		/* Do not support options e.g. vxlan gpe. */
+	if (ip_tun->options_len)
 		return -EOPNOTSUPP;
-	}
 
-	set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
-	set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 
 	/* Set tunnel type and pre-tunnel index. */
-	tmp_set_vxlan_type_index |=
-		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) |
+	tmp_set_ip_tun_type_index |=
+		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
 
-	set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index);
-
-	set_vxlan->tun_id = vxlan->key.tun_id;
-	set_vxlan->tun_flags = vxlan->key.tun_flags;
-	set_vxlan->ipv4_ttl = vxlan->key.ttl;
-	set_vxlan->ipv4_tos = vxlan->key.tos;
+	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+	set_tun->tun_id = ip_tun->key.tun_id;
 
 	/* Complete pre_tunnel action. */
-	pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst;
+	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
 
 	return 0;
 }
@@ -435,8 +443,8 @@ nfp_flower_loop_action(const struct tc_action *a,
 		       struct net_device *netdev,
 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt)
 {
+	struct nfp_fl_set_ipv4_udp_tun *set_tun;
 	struct nfp_fl_pre_tunnel *pre_tun;
-	struct nfp_fl_set_vxlan *s_vxl;
 	struct nfp_fl_push_vlan *psh_v;
 	struct nfp_fl_pop_vlan *pop_v;
 	struct nfp_fl_output *output;
@@ -484,26 +492,29 @@ nfp_flower_loop_action(const struct tc_action *a,
 
 		nfp_fl_push_vlan(psh_v, a);
 		*a_len += sizeof(struct nfp_fl_push_vlan);
-	} else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) {
+	} else if (is_tcf_tunnel_set(a)) {
+		struct nfp_repr *repr = netdev_priv(netdev);
+		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+		if (*tun_type == NFP_FL_TUNNEL_NONE)
+			return -EOPNOTSUPP;
+
 		/* Pre-tunnel action is required for tunnel encap.
 		 * This checks for next hop entries on NFP.
 		 * If none, the packet falls back before applying other actions.
 		 */
 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
-		    sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ)
+		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
 			return -EOPNOTSUPP;
 
-		*tun_type = NFP_FL_TUNNEL_VXLAN;
 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
 
-		s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len];
-		err = nfp_fl_set_vxlan(s_vxl, a, pre_tun);
+		set_tun = (void *)&nfp_fl->action_data[*a_len];
+		err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
 		if (err)
 			return err;
-
-		*a_len += sizeof(struct nfp_fl_set_vxlan);
+		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
 	} else if (is_tcf_tunnel_release(a)) {
 		/* Tunnel decap is handled by default so accept action. */
 		return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index e98bb9c..615314d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok)
 	return 0;
 }
 
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
+{
+	struct nfp_flower_cmsg_portreify *msg;
+	struct sk_buff *skb;
+
+	skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
+				    NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
+				    GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	msg = nfp_flower_cmsg_get_data(skb);
+	msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
+	msg->reserved = 0;
+	msg->info = cpu_to_be16(exists);
+
+	nfp_ctrl_tx(repr->app->ctrl, skb);
+
+	return 0;
+}
+
 static void
 nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 {
@@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
 }
 
 static void
+nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+	struct nfp_flower_priv *priv = app->priv;
+	struct nfp_flower_cmsg_portreify *msg;
+	bool exists;
+
+	msg = nfp_flower_cmsg_get_data(skb);
+
+	rcu_read_lock();
+	exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+	rcu_read_unlock();
+	if (!exists) {
+		nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
+				     be32_to_cpu(msg->portnum));
+		return;
+	}
+
+	atomic_inc(&priv->reify_replies);
+	wake_up_interruptible(&priv->reify_wait_queue);
+}
+
+static void
 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 {
 	struct nfp_flower_cmsg_hdr *cmsg_hdr;
@@ -176,6 +219,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
 
 	type = cmsg_hdr->type;
 	switch (type) {
+	case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
+		nfp_flower_cmsg_portreify_rx(app, skb);
+		break;
 	case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
 		nfp_flower_cmsg_portmod_rx(app, skb);
 		break;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 6607074..adfe474 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -41,7 +41,7 @@
 #include "../nfp_app.h"
 #include "../nfpcore/nfp_cpp.h"
 
-#define NFP_FLOWER_LAYER_META		BIT(0)
+#define NFP_FLOWER_LAYER_EXT_META	BIT(0)
 #define NFP_FLOWER_LAYER_PORT		BIT(1)
 #define NFP_FLOWER_LAYER_MAC		BIT(2)
 #define NFP_FLOWER_LAYER_TP		BIT(3)
@@ -50,8 +50,7 @@
 #define NFP_FLOWER_LAYER_CT		BIT(6)
 #define NFP_FLOWER_LAYER_VXLAN		BIT(7)
 
-#define NFP_FLOWER_LAYER_ETHER		BIT(3)
-#define NFP_FLOWER_LAYER_ARP		BIT(4)
+#define NFP_FLOWER_LAYER2_GENEVE	BIT(5)
 
 #define NFP_FLOWER_MASK_VLAN_PRIO	GENMASK(15, 13)
 #define NFP_FLOWER_MASK_VLAN_CFI	BIT(12)
@@ -108,6 +107,7 @@
 enum nfp_flower_tun_type {
 	NFP_FL_TUNNEL_NONE =	0,
 	NFP_FL_TUNNEL_VXLAN =	2,
+	NFP_FL_TUNNEL_GENEVE =	4,
 };
 
 struct nfp_fl_act_head {
@@ -165,20 +165,6 @@ struct nfp_fl_pop_vlan {
 	__be16 reserved;
 };
 
-/* Metadata without L2 (1W/4B)
- * ----------------------------------------------------------------
- *    3                   2                   1
- *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |  key_layers   |    mask_id    |           reserved            |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-struct nfp_flower_meta_one {
-	u8 nfp_flow_key_layer;
-	u8 mask_id;
-	u16 reserved;
-};
-
 struct nfp_fl_pre_tunnel {
 	struct nfp_fl_act_head head;
 	__be16 reserved;
@@ -187,16 +173,13 @@ struct nfp_fl_pre_tunnel {
 	__be32 extra[3];
 };
 
-struct nfp_fl_set_vxlan {
+struct nfp_fl_set_ipv4_udp_tun {
 	struct nfp_fl_act_head head;
 	__be16 reserved;
-	__be64 tun_id;
+	__be64 tun_id __packed;
 	__be32 tun_type_index;
-	__be16 tun_flags;
-	u8 ipv4_ttl;
-	u8 ipv4_tos;
-	__be32 extra[2];
-} __packed;
+	__be32 extra[3];
+};
 
 /* Metadata with L2 (1W/4B)
  * ----------------------------------------------------------------
@@ -209,12 +192,24 @@ struct nfp_fl_set_vxlan {
  *                           NOTE: |             TCI               |
  *                                 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  */
-struct nfp_flower_meta_two {
+struct nfp_flower_meta_tci {
 	u8 nfp_flow_key_layer;
 	u8 mask_id;
 	__be16 tci;
 };
 
+/* Extended metadata for additional key_layers (1W/4B)
+ * ----------------------------------------------------------------
+ *    3                   2                   1
+ *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                      nfp_flow_key_layer2                      |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_ext_meta {
+	__be32 nfp_flow_key_layer2;
+};
+
 /* Port details (1W/4B)
  * ----------------------------------------------------------------
  *    3                   2                   1
@@ -313,7 +308,7 @@ struct nfp_flower_ipv6 {
 	struct in6_addr ipv6_dst;
 };
 
-/* Flow Frame VXLAN --> Tunnel details (4W/16B)
+/* Flow Frame IPv4 UDP TUNNEL --> Tunnel details (4W/16B)
  * -----------------------------------------------------------------
  *    3                   2                   1
  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -322,22 +317,17 @@ struct nfp_flower_ipv6 {
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                         ipv4_addr_dst                         |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |           tun_flags           |       tos     |       ttl     |
+ * |                            Reserved                           |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |   gpe_flags   |            Reserved           | Next Protocol |
+ * |                            Reserved                           |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                     VNI                       |   Reserved    |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  */
-struct nfp_flower_vxlan {
+struct nfp_flower_ipv4_udp_tun {
 	__be32 ip_src;
 	__be32 ip_dst;
-	__be16 tun_flags;
-	u8 tos;
-	u8 ttl;
-	u8 gpe_flags;
-	u8 reserved[2];
-	u8 nxt_proto;
+	__be32 reserved[2];
 	__be32 tun_id;
 };
 
@@ -360,6 +350,7 @@ struct nfp_flower_cmsg_hdr {
 enum nfp_flower_cmsg_type_port {
 	NFP_FLOWER_CMSG_TYPE_FLOW_ADD =		0,
 	NFP_FLOWER_CMSG_TYPE_FLOW_DEL =		2,
+	NFP_FLOWER_CMSG_TYPE_PORT_REIFY =	6,
 	NFP_FLOWER_CMSG_TYPE_MAC_REPR =		7,
 	NFP_FLOWER_CMSG_TYPE_PORT_MOD =		8,
 	NFP_FLOWER_CMSG_TYPE_NO_NEIGH =		10,
@@ -396,6 +387,15 @@ struct nfp_flower_cmsg_portmod {
 
 #define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK	BIT(0)
 
+/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */
+struct nfp_flower_cmsg_portreify {
+	__be32 portnum;
+	u16 reserved;
+	__be16 info;
+};
+
+#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST	BIT(0)
+
 enum nfp_flower_cmsg_port_type {
 	NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC =	0x0,
 	NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT =	0x1,
@@ -454,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
 			     unsigned int nbi, unsigned int nbi_port,
 			     unsigned int phys_port);
 int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok);
+int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists);
 void nfp_flower_cmsg_process_rx(struct work_struct *work);
 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
 struct sk_buff *
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 8fcc90c..67c4068 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -32,6 +32,7 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/lockdep.h>
 #include <linux/pci.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
@@ -102,6 +103,52 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
 }
 
 static int
+nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
+		       bool exists)
+{
+	struct nfp_reprs *reprs;
+	int i, err, count = 0;
+
+	reprs = rcu_dereference_protected(app->reprs[type],
+					  lockdep_is_held(&app->pf->lock));
+	if (!reprs)
+		return 0;
+
+	for (i = 0; i < reprs->num_reprs; i++)
+		if (reprs->reprs[i]) {
+			struct nfp_repr *repr = netdev_priv(reprs->reprs[i]);
+
+			err = nfp_flower_cmsg_portreify(repr, exists);
+			if (err)
+				return err;
+			count++;
+		}
+
+	return count;
+}
+
+static int
+nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
+{
+	struct nfp_flower_priv *priv = app->priv;
+	int err;
+
+	if (!tot_repl)
+		return 0;
+
+	lockdep_assert_held(&app->pf->lock);
+	err = wait_event_interruptible_timeout(priv->reify_wait_queue,
+					       atomic_read(replies) >= tot_repl,
+					       msecs_to_jiffies(10));
+	if (err <= 0) {
+		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int
 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
 {
 	int err;
@@ -110,7 +157,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
 	if (err)
 		return err;
 
-	netif_carrier_on(repr->netdev);
 	netif_tx_wake_all_queues(repr->netdev);
 
 	return 0;
@@ -119,7 +165,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
 static int
 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
 {
-	netif_carrier_off(repr->netdev);
 	netif_tx_disable(repr->netdev);
 
 	return nfp_flower_cmsg_portmod(repr, false);
@@ -140,6 +185,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
 				     netdev_priv(netdev));
 }
 
+static void
+nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+	struct nfp_repr *repr = netdev_priv(netdev);
+	struct nfp_flower_priv *priv = app->priv;
+	atomic_t *replies = &priv->reify_replies;
+	int err;
+
+	atomic_set(replies, 0);
+	err = nfp_flower_cmsg_portreify(repr, false);
+	if (err) {
+		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
+		return;
+	}
+
+	nfp_flower_wait_repr_reify(app, replies, 1);
+}
+
 static void nfp_flower_sriov_disable(struct nfp_app *app)
 {
 	struct nfp_flower_priv *priv = app->priv;
@@ -157,10 +220,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 {
 	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
 	struct nfp_flower_priv *priv = app->priv;
+	atomic_t *replies = &priv->reify_replies;
 	enum nfp_port_type port_type;
 	struct nfp_reprs *reprs;
+	int i, err, reify_cnt;
 	const u8 queue = 0;
-	int i, err;
 
 	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
 						    NFP_PORT_VF_PORT;
@@ -211,7 +275,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
 
 	nfp_app_reprs_set(app, repr_type, reprs);
 
+	atomic_set(replies, 0);
+	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
+	if (reify_cnt < 0) {
+		err = reify_cnt;
+		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+		goto err_reprs_remove;
+	}
+
+	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+	if (err)
+		goto err_reprs_remove;
+
 	return 0;
+err_reprs_remove:
+	reprs = nfp_app_reprs_set(app, repr_type, NULL);
 err_reprs_clean:
 	nfp_reprs_clean_and_free(reprs);
 	return err;
@@ -233,10 +311,11 @@ static int
 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 {
 	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
+	atomic_t *replies = &priv->reify_replies;
 	struct sk_buff *ctrl_skb;
 	struct nfp_reprs *reprs;
+	int err, reify_cnt;
 	unsigned int i;
-	int err;
 
 	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
 	if (!ctrl_skb)
@@ -293,16 +372,30 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
 
 	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
 
-	/* The MAC_REPR control message should be sent after the MAC
+	/* The REIFY/MAC_REPR control messages should be sent after the MAC
 	 * representors are registered using nfp_app_reprs_set().  This is
 	 * because the firmware may respond with control messages for the
 	 * MAC representors, f.e. to provide the driver with information
 	 * about their state, and without registration the driver will drop
 	 * any such messages.
 	 */
+	atomic_set(replies, 0);
+	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
+	if (reify_cnt < 0) {
+		err = reify_cnt;
+		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
+		goto err_reprs_remove;
+	}
+
+	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
+	if (err)
+		goto err_reprs_remove;
+
 	nfp_ctrl_tx(app->ctrl, ctrl_skb);
 
 	return 0;
+err_reprs_remove:
+	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
 err_reprs_clean:
 	nfp_reprs_clean_and_free(reprs);
 err_free_ctrl_skb:
@@ -381,7 +474,7 @@ static int nfp_flower_init(struct nfp_app *app)
 {
 	const struct nfp_pf *pf = app->pf;
 	struct nfp_flower_priv *app_priv;
-	u64 version;
+	u64 version, features;
 	int err;
 
 	if (!pf->eth_tbl) {
@@ -419,11 +512,20 @@ static int nfp_flower_init(struct nfp_app *app)
 	app_priv->app = app;
 	skb_queue_head_init(&app_priv->cmsg_skbs);
 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+	init_waitqueue_head(&app_priv->reify_wait_queue);
 
 	err = nfp_flower_metadata_init(app);
 	if (err)
 		goto err_free_app_priv;
 
+	/* Extract the extra features supported by the firmware. */
+	features = nfp_rtsym_read_le(app->pf->rtbl,
+				     "_abi_flower_extra_features", &err);
+	if (err)
+		app_priv->flower_ext_feats = 0;
+	else
+		app_priv->flower_ext_feats = features;
+
 	return 0;
 
 err_free_app_priv:
@@ -468,6 +570,7 @@ const struct nfp_app_type app_flower = {
 	.vnic_clean	= nfp_flower_vnic_clean,
 
 	.repr_init	= nfp_flower_repr_netdev_init,
+	.repr_preclean	= nfp_flower_repr_netdev_preclean,
 	.repr_clean	= nfp_flower_repr_netdev_clean,
 
 	.repr_open	= nfp_flower_repr_netdev_open,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index e6b26c5..332ff0f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -34,6 +34,8 @@
 #ifndef __NFP_FLOWER_H__
 #define __NFP_FLOWER_H__ 1
 
+#include "cmsg.h"
+
 #include <linux/circ_buf.h>
 #include <linux/hashtable.h>
 #include <linux/time64.h>
@@ -58,6 +60,10 @@ struct nfp_app;
 #define NFP_FL_MASK_ID_LOCATION		1
 
 #define NFP_FL_VXLAN_PORT		4789
+#define NFP_FL_GENEVE_PORT		6081
+
+/* Extra features bitmap. */
+#define NFP_FL_FEATS_GENEVE		BIT(0)
 
 struct nfp_fl_mask_id {
 	struct circ_buf mask_id_free_list;
@@ -77,6 +83,7 @@ struct nfp_fl_stats_id {
  * @nn:			Pointer to vNIC
  * @mask_id_seed:	Seed used for mask hash table
  * @flower_version:	HW version of flower
+ * @flower_ext_feats:	Bitmap of extra features the HW supports
  * @stats_ids:		List of free stats ids
  * @mask_ids:		List of free mask ids
  * @mask_table:		Hash table used to store masks
@@ -95,12 +102,16 @@ struct nfp_fl_stats_id {
  * @nfp_mac_off_count:	Number of MACs in address list
  * @nfp_tun_mac_nb:	Notifier to monitor link state
  * @nfp_tun_neigh_nb:	Notifier to monitor neighbour state
+ * @reify_replies:	atomically stores the number of replies received
+ *			from firmware for repr reify
+ * @reify_wait_queue:	wait queue for repr reify response counting
  */
 struct nfp_flower_priv {
 	struct nfp_app *app;
 	struct nfp_net *nn;
 	u32 mask_id_seed;
 	u64 flower_version;
+	u64 flower_ext_feats;
 	struct nfp_fl_stats_id stats_ids;
 	struct nfp_fl_mask_id mask_ids;
 	DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
@@ -119,6 +130,8 @@ struct nfp_flower_priv {
 	int nfp_mac_off_count;
 	struct notifier_block nfp_tun_mac_nb;
 	struct notifier_block nfp_tun_neigh_nb;
+	atomic_t reify_replies;
+	wait_queue_head_t reify_wait_queue;
 };
 
 struct nfp_fl_key_ls {
@@ -172,7 +185,8 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 				  struct nfp_fl_key_ls *key_ls,
 				  struct net_device *netdev,
-				  struct nfp_fl_payload *nfp_flow);
+				  struct nfp_fl_payload *nfp_flow,
+				  enum nfp_flower_tun_type tun_type);
 int nfp_flower_compile_action(struct tc_cls_flower_offload *flow,
 			      struct net_device *netdev,
 			      struct nfp_fl_payload *nfp_flow);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 60614d4..37c2eca 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -38,7 +38,7 @@
 #include "main.h"
 
 static void
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
 			    struct tc_cls_flower_offload *flow, u8 key_type,
 			    bool mask_version)
 {
@@ -46,7 +46,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
 	struct flow_dissector_key_vlan *flow_vlan;
 	u16 tmp_tci;
 
-	memset(frame, 0, sizeof(struct nfp_flower_meta_two));
+	memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
 	/* Populate the metadata frame. */
 	frame->nfp_flow_key_layer = key_type;
 	frame->mask_id = ~0;
@@ -68,11 +68,9 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
 }
 
 static void
-nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type)
+nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext)
 {
-	frame->nfp_flow_key_layer = key_type;
-	frame->mask_id = 0;
-	frame->reserved = 0;
+	frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext);
 }
 
 static int
@@ -224,16 +222,15 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
 }
 
 static void
-nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
-			 struct tc_cls_flower_offload *flow,
-			 bool mask_version, __be32 *tun_dst)
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
+				struct tc_cls_flower_offload *flow,
+				bool mask_version)
 {
 	struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
-	struct flow_dissector_key_ipv4_addrs *vxlan_ips;
+	struct flow_dissector_key_ipv4_addrs *tun_ips;
 	struct flow_dissector_key_keyid *vni;
 
-	/* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */
-	memset(frame, 0, sizeof(struct nfp_flower_vxlan));
+	memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
 
 	if (dissector_uses_key(flow->dissector,
 			       FLOW_DISSECTOR_KEY_ENC_KEYID)) {
@@ -248,79 +245,67 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame,
 
 	if (dissector_uses_key(flow->dissector,
 			       FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
-		vxlan_ips =
+		tun_ips =
 		   skb_flow_dissector_target(flow->dissector,
 					     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
 					     target);
-		frame->ip_src = vxlan_ips->src;
-		frame->ip_dst = vxlan_ips->dst;
-		*tun_dst = vxlan_ips->dst;
+		frame->ip_src = tun_ips->src;
+		frame->ip_dst = tun_ips->dst;
 	}
 }
 
 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 				  struct nfp_fl_key_ls *key_ls,
 				  struct net_device *netdev,
-				  struct nfp_fl_payload *nfp_flow)
+				  struct nfp_fl_payload *nfp_flow,
+				  enum nfp_flower_tun_type tun_type)
 {
-	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
-	__be32 tun_dst, tun_dst_mask = 0;
 	struct nfp_repr *netdev_repr;
 	int err;
 	u8 *ext;
 	u8 *msk;
 
-	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN)
-		tun_type = NFP_FL_TUNNEL_VXLAN;
-
 	memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
 	memset(nfp_flow->mask_data, 0, key_ls->key_size);
 
 	ext = nfp_flow->unmasked_data;
 	msk = nfp_flow->mask_data;
-	if (NFP_FLOWER_LAYER_PORT & key_ls->key_layer) {
-		/* Populate Exact Metadata. */
-		nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)ext,
-					    flow, key_ls->key_layer, false);
-		/* Populate Mask Metadata. */
-		nfp_flower_compile_meta_tci((struct nfp_flower_meta_two *)msk,
-					    flow, key_ls->key_layer, true);
-		ext += sizeof(struct nfp_flower_meta_two);
-		msk += sizeof(struct nfp_flower_meta_two);
 
-		/* Populate Exact Port data. */
-		err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
-					      nfp_repr_get_port_id(netdev),
-					      false, tun_type);
-		if (err)
-			return err;
+	/* Populate Exact Metadata. */
+	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
+				    flow, key_ls->key_layer, false);
+	/* Populate Mask Metadata. */
+	nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
+				    flow, key_ls->key_layer, true);
+	ext += sizeof(struct nfp_flower_meta_tci);
+	msk += sizeof(struct nfp_flower_meta_tci);
 
-		/* Populate Mask Port Data. */
-		err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
-					      nfp_repr_get_port_id(netdev),
-					      true, tun_type);
-		if (err)
-			return err;
-
-		ext += sizeof(struct nfp_flower_in_port);
-		msk += sizeof(struct nfp_flower_in_port);
-	} else {
-		/* Populate Exact Metadata. */
-		nfp_flower_compile_meta((struct nfp_flower_meta_one *)ext,
-					key_ls->key_layer);
-		/* Populate Mask Metadata. */
-		nfp_flower_compile_meta((struct nfp_flower_meta_one *)msk,
-					key_ls->key_layer);
-		ext += sizeof(struct nfp_flower_meta_one);
-		msk += sizeof(struct nfp_flower_meta_one);
+	/* Populate Extended Metadata if Required. */
+	if (NFP_FLOWER_LAYER_EXT_META & key_ls->key_layer) {
+		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)ext,
+					    key_ls->key_layer_two);
+		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
+					    key_ls->key_layer_two);
+		ext += sizeof(struct nfp_flower_ext_meta);
+		msk += sizeof(struct nfp_flower_ext_meta);
 	}
 
-	if (NFP_FLOWER_LAYER_META & key_ls->key_layer) {
-		/* Additional Metadata Fields.
-		 * Currently unsupported.
-		 */
-		return -EOPNOTSUPP;
-	}
+	/* Populate Exact Port data. */
+	err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
+				      nfp_repr_get_port_id(netdev),
+				      false, tun_type);
+	if (err)
+		return err;
+
+	/* Populate Mask Port Data. */
+	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
+				      nfp_repr_get_port_id(netdev),
+				      true, tun_type);
+	if (err)
+		return err;
+
+	ext += sizeof(struct nfp_flower_in_port);
+	msk += sizeof(struct nfp_flower_in_port);
 
 	if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
 		/* Populate Exact MAC Data. */
@@ -366,15 +351,17 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
 		msk += sizeof(struct nfp_flower_ipv6);
 	}
 
-	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) {
+	if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
+	    key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
+		__be32 tun_dst;
+
 		/* Populate Exact VXLAN Data. */
-		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext,
-					 flow, false, &tun_dst);
+		nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
 		/* Populate Mask VXLAN Data. */
-		nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk,
-					 flow, true, &tun_dst_mask);
-		ext += sizeof(struct nfp_flower_vxlan);
-		msk += sizeof(struct nfp_flower_vxlan);
+		nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
+		tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
+		ext += sizeof(struct nfp_flower_ipv4_udp_tun);
+		msk += sizeof(struct nfp_flower_ipv4_udp_tun);
 
 		/* Configure tunnel end point MAC. */
 		if (nfp_netdev_is_nfp_repr(netdev)) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 553f94f5..837134a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -130,12 +130,15 @@ static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
 }
 
 static int
-nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
+nfp_flower_calculate_key_layers(struct nfp_app *app,
+				struct nfp_fl_key_ls *ret_key_ls,
 				struct tc_cls_flower_offload *flow,
-				bool egress)
+				bool egress,
+				enum nfp_flower_tun_type *tun_type)
 {
 	struct flow_dissector_key_basic *mask_basic = NULL;
 	struct flow_dissector_key_basic *key_basic = NULL;
+	struct nfp_flower_priv *priv = app->priv;
 	u32 key_layer_two;
 	u8 key_layer;
 	int key_size;
@@ -150,10 +153,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
 		return -EOPNOTSUPP;
 
 	key_layer_two = 0;
-	key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
-	key_size = sizeof(struct nfp_flower_meta_one) +
-		   sizeof(struct nfp_flower_in_port) +
-		   sizeof(struct nfp_flower_mac_mpls);
+	key_layer = NFP_FLOWER_LAYER_PORT;
+	key_size = sizeof(struct nfp_flower_meta_tci) +
+		   sizeof(struct nfp_flower_in_port);
+
+	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
+	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+		key_layer |= NFP_FLOWER_LAYER_MAC;
+		key_size += sizeof(struct nfp_flower_mac_mpls);
+	}
 
 	if (dissector_uses_key(flow->dissector,
 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -192,12 +200,27 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
 						  flow->key);
 
-		if (mask_enc_ports->dst != cpu_to_be16(~0) ||
-		    enc_ports->dst != htons(NFP_FL_VXLAN_PORT))
+		if (mask_enc_ports->dst != cpu_to_be16(~0))
 			return -EOPNOTSUPP;
 
-		key_layer |= NFP_FLOWER_LAYER_VXLAN;
-		key_size += sizeof(struct nfp_flower_vxlan);
+		switch (enc_ports->dst) {
+		case htons(NFP_FL_VXLAN_PORT):
+			*tun_type = NFP_FL_TUNNEL_VXLAN;
+			key_layer |= NFP_FLOWER_LAYER_VXLAN;
+			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+			break;
+		case htons(NFP_FL_GENEVE_PORT):
+			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
+				return -EOPNOTSUPP;
+			*tun_type = NFP_FL_TUNNEL_GENEVE;
+			key_layer |= NFP_FLOWER_LAYER_EXT_META;
+			key_size += sizeof(struct nfp_flower_ext_meta);
+			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
+			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
+			break;
+		default:
+			return -EOPNOTSUPP;
+		}
 	} else if (egress) {
 		/* Reject non tunnel matches offloaded to egress repr. */
 		return -EOPNOTSUPP;
@@ -325,6 +348,7 @@ static int
 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 		       struct tc_cls_flower_offload *flow, bool egress)
 {
+	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
 	struct nfp_flower_priv *priv = app->priv;
 	struct nfp_fl_payload *flow_pay;
 	struct nfp_fl_key_ls *key_layer;
@@ -334,7 +358,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 	if (!key_layer)
 		return -ENOMEM;
 
-	err = nfp_flower_calculate_key_layers(key_layer, flow, egress);
+	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+					      &tun_type);
 	if (err)
 		goto err_free_key_ls;
 
@@ -344,7 +369,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 		goto err_free_key_ls;
 	}
 
-	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
+	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
+					    tun_type);
 	if (err)
 		goto err_destroy_flow;
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 0e5e030..32ff46a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -77,18 +77,20 @@ extern const struct nfp_app_type app_flower;
  * @vnic_init:	vNIC netdev was registered
  * @vnic_clean:	vNIC netdev about to be unregistered
  * @repr_init:	representor about to be registered
+ * @repr_preclean:	representor about to unregistered, executed before app
+ *			reference to the it is removed
  * @repr_clean:	representor about to be unregistered
  * @repr_open:	representor netdev open callback
  * @repr_stop:	representor netdev stop callback
+ * @change_mtu:	MTU change on a netdev has been requested (veto-only, change
+ *		is not guaranteed to be committed)
  * @start:	start application logic
  * @stop:	stop application logic
  * @ctrl_msg_rx:    control message handler
  * @setup_tc:	setup TC ndo
  * @tc_busy:	TC HW offload busy (rules loaded)
+ * @bpf:	BPF ndo offload-related calls
  * @xdp_offload:    offload an XDP program
- * @bpf_verifier_prep:	verifier prep for dev-specific BPF programs
- * @bpf_translate:	translate call for dev-specific BPF programs
- * @bpf_destroy:	destroy for dev-specific BPF programs
  * @eswitch_mode_get:    get SR-IOV eswitch mode
  * @sriov_enable: app-specific sriov initialisation
  * @sriov_disable: app-specific sriov clean-up
@@ -112,11 +114,15 @@ struct nfp_app_type {
 	void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn);
 
 	int (*repr_init)(struct nfp_app *app, struct net_device *netdev);
+	void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev);
 	void (*repr_clean)(struct nfp_app *app, struct net_device *netdev);
 
 	int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
 	int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
 
+	int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
+			  int new_mtu);
+
 	int (*start)(struct nfp_app *app);
 	void (*stop)(struct nfp_app *app);
 
@@ -125,14 +131,10 @@ struct nfp_app_type {
 	int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
 			enum tc_setup_type type, void *type_data);
 	bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
+	int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
+		   struct netdev_bpf *xdp);
 	int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
 			   struct bpf_prog *prog);
-	int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
-				 struct netdev_bpf *bpf);
-	int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
-			     struct bpf_prog *prog);
-	int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
-			   struct bpf_prog *prog);
 
 	int (*sriov_enable)(struct nfp_app *app, int num_vfs);
 	void (*sriov_disable)(struct nfp_app *app);
@@ -226,12 +228,27 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev)
 }
 
 static inline void
+nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev)
+{
+	if (app->type->repr_preclean)
+		app->type->repr_preclean(app, netdev);
+}
+
+static inline void
 nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
 {
 	if (app->type->repr_clean)
 		app->type->repr_clean(app, netdev);
 }
 
+static inline int
+nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
+{
+	if (!app || !app->type->change_mtu)
+		return 0;
+	return app->type->change_mtu(app, netdev, new_mtu);
+}
+
 static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
 {
 	app->ctrl = ctrl;
@@ -293,6 +310,14 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
 	return app->type->setup_tc(app, netdev, type, type_data);
 }
 
+static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
+			      struct netdev_bpf *bpf)
+{
+	if (!app || !app->type->bpf)
+		return -EINVAL;
+	return app->type->bpf(app, nn, bpf);
+}
+
 static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 				      struct bpf_prog *prog)
 {
@@ -301,33 +326,6 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 	return app->type->xdp_offload(app, nn, prog);
 }
 
-static inline int
-nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
-			  struct netdev_bpf *bpf)
-{
-	if (!app || !app->type->bpf_verifier_prep)
-		return -EOPNOTSUPP;
-	return app->type->bpf_verifier_prep(app, nn, bpf);
-}
-
-static inline int
-nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
-		      struct bpf_prog *prog)
-{
-	if (!app || !app->type->bpf_translate)
-		return -EOPNOTSUPP;
-	return app->type->bpf_translate(app, nn, prog);
-}
-
-static inline int
-nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
-		    struct bpf_prog *prog)
-{
-	if (!app || !app->type->bpf_destroy)
-		return -EOPNOTSUPP;
-	return app->type->bpf_destroy(app, nn, prog);
-}
-
 static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
 {
 	trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 830f6de..9ee3a3f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -41,6 +41,7 @@
 
 const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
 	[CMD_TGT_WRITE8_SWAP] =		{ 0x02, 0x42 },
+	[CMD_TGT_WRITE32_SWAP] =	{ 0x02, 0x5f },
 	[CMD_TGT_READ8] =		{ 0x01, 0x43 },
 	[CMD_TGT_READ32] =		{ 0x00, 0x5c },
 	[CMD_TGT_READ32_LE] =		{ 0x01, 0x5c },
@@ -49,6 +50,36 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
 	[CMD_TGT_READ_SWAP_LE] =	{ 0x03, 0x40 },
 };
 
+u16 br_get_offset(u64 instr)
+{
+	u16 addr_lo, addr_hi;
+
+	addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
+	addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
+
+	return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
+		addr_lo;
+}
+
+void br_set_offset(u64 *instr, u16 offset)
+{
+	u16 addr_lo, addr_hi;
+
+	addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+	addr_hi = offset != addr_lo;
+	*instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+	*instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+	*instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+void br_add_offset(u64 *instr, u16 offset)
+{
+	u16 addr;
+
+	addr = br_get_offset(*instr);
+	br_set_offset(instr, addr + offset);
+}
+
 static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
 {
 	bool lm_id, lm_dec = false;
@@ -120,7 +151,8 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
 	reg->dst = nfp_swreg_to_unreg(dst, true);
 
 	/* Decode source operands */
-	if (swreg_type(lreg) == swreg_type(rreg))
+	if (swreg_type(lreg) == swreg_type(rreg) &&
+	    swreg_type(lreg) != NN_REG_NONE)
 		return -EFAULT;
 
 	if (swreg_type(lreg) == NN_REG_GPR_B ||
@@ -200,7 +232,8 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
 	reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
 
 	/* Decode source operands */
-	if (swreg_type(lreg) == swreg_type(rreg))
+	if (swreg_type(lreg) == swreg_type(rreg) &&
+	    swreg_type(lreg) != NN_REG_NONE)
 		return -EFAULT;
 
 	if (swreg_type(lreg) == NN_REG_GPR_B ||
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index 74d0c11..20e51cb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016 Netronome Systems, Inc.
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -77,9 +77,11 @@
 enum br_mask {
 	BR_BEQ = 0x00,
 	BR_BNE = 0x01,
+	BR_BMI = 0x02,
 	BR_BHS = 0x04,
 	BR_BLO = 0x05,
 	BR_BGE = 0x08,
+	BR_BLT = 0x09,
 	BR_UNC = 0x18,
 };
 
@@ -92,6 +94,10 @@ enum br_ctx_signal_state {
 	BR_CSS_NONE = 2,
 };
 
+u16 br_get_offset(u64 instr);
+void br_set_offset(u64 *instr, u16 offset);
+void br_add_offset(u64 *instr, u16 offset);
+
 #define OP_BBYTE_BASE		0x0c800000000ULL
 #define OP_BB_A_SRC		0x000000000ffULL
 #define OP_BB_BYTE		0x00000000300ULL
@@ -175,6 +181,7 @@ enum alu_op {
 	ALU_OP_NONE	= 0x00,
 	ALU_OP_ADD	= 0x01,
 	ALU_OP_NOT	= 0x04,
+	ALU_OP_ADD_2B	= 0x05,
 	ALU_OP_AND	= 0x08,
 	ALU_OP_SUB_C	= 0x0d,
 	ALU_OP_ADD_C	= 0x11,
@@ -209,6 +216,7 @@ enum alu_dst_ab {
 #define OP_CMD_CNT		0x0000e000000ULL
 #define OP_CMD_SIG		0x000f0000000ULL
 #define OP_CMD_TGT_CMD		0x07f00000000ULL
+#define OP_CMD_INDIR		0x20000000000ULL
 #define OP_CMD_MODE	       0x1c0000000000ULL
 
 struct cmd_tgt_act {
@@ -219,6 +227,7 @@ struct cmd_tgt_act {
 enum cmd_tgt_map {
 	CMD_TGT_READ8,
 	CMD_TGT_WRITE8_SWAP,
+	CMD_TGT_WRITE32_SWAP,
 	CMD_TGT_READ32,
 	CMD_TGT_READ32_LE,
 	CMD_TGT_READ32_SWAP,
@@ -240,6 +249,9 @@ enum cmd_ctx_swap {
 	CMD_CTX_NO_SWAP = 3,
 };
 
+#define CMD_OVE_LEN	BIT(7)
+#define CMD_OV_LEN	GENMASK(12, 8)
+
 #define OP_LCSR_BASE		0x0fc00000000ULL
 #define OP_LCSR_A_SRC		0x000000003ffULL
 #define OP_LCSR_B_SRC		0x000000ffc00ULL
@@ -257,6 +269,7 @@ enum lcsr_wr_src {
 #define OP_CARB_BASE		0x0e000000000ULL
 #define OP_CARB_OR		0x00000010000ULL
 
+#define NFP_CSR_CTX_PTR		0x20
 #define NFP_CSR_ACT_LM_ADDR0	0x64
 #define NFP_CSR_ACT_LM_ADDR1	0x6c
 #define NFP_CSR_ACT_LM_ADDR2	0x94
@@ -377,4 +390,13 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
 int nfp_ustore_check_valid_no_ecc(u64 insn);
 u64 nfp_ustore_calc_ecc_insn(u64 insn);
 
+#define NFP_IND_ME_REFL_WR_SIG_INIT	3
+#define NFP_IND_ME_CTX_PTR_BASE_MASK	GENMASK(9, 0)
+#define NFP_IND_NUM_CONTEXTS		8
+
+static inline u32 nfp_get_ind_csr_ctx_ptr_offs(u32 read_offset)
+{
+	return (read_offset & ~NFP_IND_ME_CTX_PTR_BASE_MASK) | NFP_CSR_CTX_PTR;
+}
+
 #endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 35eaccb..0953fa8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -45,6 +45,7 @@
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <linux/vermagic.h>
+#include <linux/vmalloc.h>
 #include <net/devlink.h>
 
 #include "nfpcore/nfp.h"
@@ -509,6 +510,9 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 	pf->mip = nfp_mip_open(pf->cpp);
 	pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
 
+	pf->dump_flag = NFP_DUMP_NSP_DIAG;
+	pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl);
+
 	err = nfp_pcie_sriov_read_nfd_limit(pf);
 	if (err)
 		goto err_fw_unload;
@@ -544,6 +548,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
 		nfp_fw_unload(pf);
 	kfree(pf->eth_tbl);
 	kfree(pf->nspi);
+	vfree(pf->dumpspec);
 err_devlink_unreg:
 	devlink_unregister(devlink);
 err_hwinfo_free:
@@ -579,6 +584,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
 
 	devlink_unregister(devlink);
 
+	vfree(pf->dumpspec);
 	kfree(pf->rtbl);
 	nfp_mip_close(pf->mip);
 	if (pf->fw_loaded)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index be0ee59..add46e2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -39,6 +39,7 @@
 #ifndef NFP_MAIN_H
 #define NFP_MAIN_H
 
+#include <linux/ethtool.h>
 #include <linux/list.h>
 #include <linux/types.h>
 #include <linux/msi.h>
@@ -62,6 +63,17 @@ struct nfp_port;
 struct nfp_rtsym_table;
 
 /**
+ * struct nfp_dumpspec - NFP FW dump specification structure
+ * @size:	Size of the data
+ * @data:	Sequence of TLVs, each being an instruction to dump some data
+ *		from FW
+ */
+struct nfp_dumpspec {
+	u32 size;
+	u8 data[0];
+};
+
+/**
  * struct nfp_pf - NFP PF-specific device structure
  * @pdev:		Backpointer to PCI device
  * @cpp:		Pointer to the CPP handle
@@ -83,6 +95,9 @@ struct nfp_rtsym_table;
  * @mip:		MIP handle
  * @rtbl:		RTsym table
  * @hwinfo:		HWInfo table
+ * @dumpspec:		Debug dump specification
+ * @dump_flag:		Store dump flag between set_dump and get_dump_flag
+ * @dump_len:		Store dump length between set_dump and get_dump_flag
  * @eth_tbl:		NSP ETH table
  * @nspi:		NSP identification info
  * @hwmon_dev:		pointer to hwmon device
@@ -124,6 +139,9 @@ struct nfp_pf {
 	const struct nfp_mip *mip;
 	struct nfp_rtsym_table *rtbl;
 	struct nfp_hwinfo *hwinfo;
+	struct nfp_dumpspec *dumpspec;
+	u32 dump_flag;
+	u32 dump_len;
 	struct nfp_eth_table *eth_tbl;
 	struct nfp_nsp_identify *nspi;
 
@@ -157,4 +175,15 @@ void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
 
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 
+enum nfp_dump_diag {
+	NFP_DUMP_NSP_DIAG = 0,
+};
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl);
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				u32 flag);
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				 struct ethtool_dump *dump_param, void *dest);
+
 #endif /* NFP_MAIN_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 7f9857c..0e564cf 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -47,6 +47,7 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/io-64-nonatomic-hi-lo.h>
+#include <net/xdp.h>
 
 #include "nfp_net_ctrl.h"
 
@@ -350,6 +351,7 @@ struct nfp_net_rx_buf {
  * @rxds:       Virtual address of FL/RX ring in host memory
  * @dma:        DMA address of the FL/RX ring
  * @size:       Size, in bytes, of the FL/RX ring (needed to free)
+ * @xdp_rxq:    RX-ring info avail for XDP
  */
 struct nfp_net_rx_ring {
 	struct nfp_net_r_vector *r_vec;
@@ -361,13 +363,14 @@ struct nfp_net_rx_ring {
 	u32 idx;
 
 	int fl_qcidx;
+	unsigned int size;
 	u8 __iomem *qcp_fl;
 
 	struct nfp_net_rx_buf *rxbufs;
 	struct nfp_net_rx_desc *rxds;
 
 	dma_addr_t dma;
-	unsigned int size;
+	struct xdp_rxq_info xdp_rxq;
 } ____cacheline_aligned;
 
 /**
@@ -548,6 +551,8 @@ struct nfp_net_dp {
  * @max_r_vecs:		Number of allocated interrupt vectors for RX/TX
  * @max_tx_rings:       Maximum number of TX rings supported by the Firmware
  * @max_rx_rings:       Maximum number of RX rings supported by the Firmware
+ * @stride_rx:		Queue controller RX queue spacing
+ * @stride_tx:		Queue controller TX queue spacing
  * @r_vecs:             Pre-allocated array of ring vectors
  * @irq_entries:        Pre-allocated array of MSI-X entries
  * @lsc_handler:        Handler for Link State Change interrupt
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 99b0487..07e0587 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1610,11 +1610,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 	unsigned int true_bufsz;
 	struct sk_buff *skb;
 	int pkts_polled = 0;
+	struct xdp_buff xdp;
 	int idx;
 
 	rcu_read_lock();
 	xdp_prog = READ_ONCE(dp->xdp_prog);
 	true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
+	xdp.rxq = &rx_ring->xdp_rxq;
 	tx_ring = r_vec->xdp_ring;
 
 	while (pkts_polled < budget) {
@@ -1705,7 +1707,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
 				  dp->bpf_offload_xdp) && !meta.portid) {
 			void *orig_data = rxbuf->frag + pkt_off;
 			unsigned int dma_off;
-			struct xdp_buff xdp;
 			int act;
 
 			xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
@@ -2254,6 +2255,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
 	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
 	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
 
+	if (dp->netdev)
+		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 	kfree(rx_ring->rxbufs);
 
 	if (rx_ring->rxds)
@@ -2277,7 +2280,14 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
 static int
 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
 {
-	int sz;
+	int sz, err;
+
+	if (dp->netdev) {
+		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
+				       rx_ring->idx);
+		if (err < 0)
+			return err;
+	}
 
 	rx_ring->cnt = dp->rxd_cnt;
 	rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
@@ -2852,6 +2862,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
 
 	new_ctrl = nn->dp.ctrl;
 
+	if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
+		new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
+	else
+		new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
+
 	if (netdev->flags & IFF_PROMISC) {
 		if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
 			new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
@@ -3036,6 +3051,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct nfp_net *nn = netdev_priv(netdev);
 	struct nfp_net_dp *dp;
+	int err;
+
+	err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
+	if (err)
+		return err;
 
 	dp = nfp_net_clone_dp(nn);
 	if (!dp)
@@ -3394,17 +3414,10 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
 		if (nn->dp.bpf_offload_xdp)
 			xdp->prog_attached = XDP_ATTACHED_HW;
 		xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
+		xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
 		return 0;
-	case BPF_OFFLOAD_VERIFIER_PREP:
-		return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
-	case BPF_OFFLOAD_TRANSLATE:
-		return nfp_app_bpf_translate(nn->app, nn,
-					     xdp->offload.prog);
-	case BPF_OFFLOAD_DESTROY:
-		return nfp_app_bpf_destroy(nn->app, nn,
-					   xdp->offload.prog);
 	default:
-		return -EINVAL;
+		return nfp_app_bpf(nn->app, nn, xdp);
 	}
 }
 
@@ -3563,9 +3576,6 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
  */
 void nfp_net_free(struct nfp_net *nn)
 {
-	if (nn->xdp_prog)
-		bpf_prog_put(nn->xdp_prog);
-
 	if (nn->dp.netdev)
 		free_netdev(nn->dp.netdev);
 	else
@@ -3791,8 +3801,6 @@ int nfp_net_init(struct nfp_net *nn)
 	/* Allow L2 Broadcast and Multicast through by default, if supported */
 	if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
 		nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
-	if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
-		nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
 
 	/* Allow IRQ moderation, if supported */
 	if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 782d452..25c3600 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -91,23 +91,24 @@
 #define NFP_NET_RSS_IPV6_EX_UDP         9
 
 /**
- * @NFP_NET_TXR_MAX:         Maximum number of TX rings
- * @NFP_NET_RXR_MAX:         Maximum number of RX rings
+ * Ring counts
+ * %NFP_NET_TXR_MAX:         Maximum number of TX rings
+ * %NFP_NET_RXR_MAX:         Maximum number of RX rings
  */
 #define NFP_NET_TXR_MAX                 64
 #define NFP_NET_RXR_MAX                 64
 
 /**
  * Read/Write config words (0x0000 - 0x002c)
- * @NFP_NET_CFG_CTRL:        Global control
- * @NFP_NET_CFG_UPDATE:      Indicate which fields are updated
- * @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
- * @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
- * @NFP_NET_CFG_MTU:         Set MTU size
- * @NFP_NET_CFG_FLBUFSZ:     Set freelist buffer size (must be larger than MTU)
- * @NFP_NET_CFG_EXN:         MSI-X table entry for exceptions
- * @NFP_NET_CFG_LSC:         MSI-X table entry for link state changes
- * @NFP_NET_CFG_MACADDR:     MAC address
+ * %NFP_NET_CFG_CTRL:        Global control
+ * %NFP_NET_CFG_UPDATE:      Indicate which fields are updated
+ * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
+ * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
+ * %NFP_NET_CFG_MTU:         Set MTU size
+ * %NFP_NET_CFG_FLBUFSZ:     Set freelist buffer size (must be larger than MTU)
+ * %NFP_NET_CFG_EXN:         MSI-X table entry for exceptions
+ * %NFP_NET_CFG_LSC:         MSI-X table entry for link state changes
+ * %NFP_NET_CFG_MACADDR:     MAC address
  *
  * TODO:
  * - define Error details in UPDATE
@@ -176,14 +177,14 @@
 
 /**
  * Read-only words (0x0030 - 0x0050):
- * @NFP_NET_CFG_VERSION:     Firmware version number
- * @NFP_NET_CFG_STS:         Status
- * @NFP_NET_CFG_CAP:         Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_CFG_MAX_MTU:     Maximum support MTU
- * @NFP_NET_CFG_START_TXQ:   Start Queue Control Queue to use for TX (PF only)
- * @NFP_NET_CFG_START_RXQ:   Start Queue Control Queue to use for RX (PF only)
+ * %NFP_NET_CFG_VERSION:     Firmware version number
+ * %NFP_NET_CFG_STS:         Status
+ * %NFP_NET_CFG_CAP:         Capabilities (same bits as %NFP_NET_CFG_CTRL)
+ * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * %NFP_NET_CFG_MAX_MTU:     Maximum support MTU
+ * %NFP_NET_CFG_START_TXQ:   Start Queue Control Queue to use for TX (PF only)
+ * %NFP_NET_CFG_START_RXQ:   Start Queue Control Queue to use for RX (PF only)
  *
  * TODO:
  * - define more STS bits
@@ -228,31 +229,31 @@
 
 /**
  * RSS capabilities
- * @NFP_NET_CFG_RSS_CAP_HFUNC:	supported hash functions (same bits as
- *				@NFP_NET_CFG_RSS_HFUNC)
+ * %NFP_NET_CFG_RSS_CAP_HFUNC:	supported hash functions (same bits as
+ *				%NFP_NET_CFG_RSS_HFUNC)
  */
 #define NFP_NET_CFG_RSS_CAP		0x0054
 #define   NFP_NET_CFG_RSS_CAP_HFUNC	  0xff000000
 
 /**
  * VXLAN/UDP encap configuration
- * @NFP_NET_CFG_VXLAN_PORT:	Base address of table of tunnels' UDP dst ports
- * @NFP_NET_CFG_VXLAN_SZ:	Size of the UDP port table in bytes
+ * %NFP_NET_CFG_VXLAN_PORT:	Base address of table of tunnels' UDP dst ports
+ * %NFP_NET_CFG_VXLAN_SZ:	Size of the UDP port table in bytes
  */
 #define NFP_NET_CFG_VXLAN_PORT		0x0060
 #define NFP_NET_CFG_VXLAN_SZ		  0x0008
 
 /**
  * BPF section
- * @NFP_NET_CFG_BPF_ABI:	BPF ABI version
- * @NFP_NET_CFG_BPF_CAP:	BPF capabilities
- * @NFP_NET_CFG_BPF_MAX_LEN:	Maximum size of JITed BPF code in bytes
- * @NFP_NET_CFG_BPF_START:	Offset at which BPF will be loaded
- * @NFP_NET_CFG_BPF_DONE:	Offset to jump to on exit
- * @NFP_NET_CFG_BPF_STACK_SZ:	Total size of stack area in 64B chunks
- * @NFP_NET_CFG_BPF_INL_MTU:	Packet data split offset in 64B chunks
- * @NFP_NET_CFG_BPF_SIZE:	Size of the JITed BPF code in instructions
- * @NFP_NET_CFG_BPF_ADDR:	DMA address of the buffer with JITed BPF code
+ * %NFP_NET_CFG_BPF_ABI:	BPF ABI version
+ * %NFP_NET_CFG_BPF_CAP:	BPF capabilities
+ * %NFP_NET_CFG_BPF_MAX_LEN:	Maximum size of JITed BPF code in bytes
+ * %NFP_NET_CFG_BPF_START:	Offset at which BPF will be loaded
+ * %NFP_NET_CFG_BPF_DONE:	Offset to jump to on exit
+ * %NFP_NET_CFG_BPF_STACK_SZ:	Total size of stack area in 64B chunks
+ * %NFP_NET_CFG_BPF_INL_MTU:	Packet data split offset in 64B chunks
+ * %NFP_NET_CFG_BPF_SIZE:	Size of the JITed BPF code in instructions
+ * %NFP_NET_CFG_BPF_ADDR:	DMA address of the buffer with JITed BPF code
  */
 #define NFP_NET_CFG_BPF_ABI		0x0080
 #define   NFP_NET_BPF_ABI		2
@@ -278,9 +279,9 @@
 /**
  * RSS configuration (0x0100 - 0x01ac):
  * Used only when NFP_NET_CFG_CTRL_RSS is enabled
- * @NFP_NET_CFG_RSS_CFG:     RSS configuration word
- * @NFP_NET_CFG_RSS_KEY:     RSS "secret" key
- * @NFP_NET_CFG_RSS_ITBL:    RSS indirection table
+ * %NFP_NET_CFG_RSS_CFG:     RSS configuration word
+ * %NFP_NET_CFG_RSS_KEY:     RSS "secret" key
+ * %NFP_NET_CFG_RSS_ITBL:    RSS indirection table
  */
 #define NFP_NET_CFG_RSS_BASE            0x0100
 #define NFP_NET_CFG_RSS_CTRL            NFP_NET_CFG_RSS_BASE
@@ -305,13 +306,13 @@
 
 /**
  * TX ring configuration (0x200 - 0x800)
- * @NFP_NET_CFG_TXR_BASE:    Base offset for TX ring configuration
- * @NFP_NET_CFG_TXR_ADDR:    Per TX ring DMA address (8B entries)
- * @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
- * @NFP_NET_CFG_TXR_SZ:      Per TX ring ring size (1B entries)
- * @NFP_NET_CFG_TXR_VEC:     Per TX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_TXR_PRIO:    Per TX ring priority (1B entries)
- * @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
+ * %NFP_NET_CFG_TXR_BASE:    Base offset for TX ring configuration
+ * %NFP_NET_CFG_TXR_ADDR:    Per TX ring DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
+ * %NFP_NET_CFG_TXR_SZ:      Per TX ring ring size (1B entries)
+ * %NFP_NET_CFG_TXR_VEC:     Per TX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_TXR_PRIO:    Per TX ring priority (1B entries)
+ * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
  */
 #define NFP_NET_CFG_TXR_BASE            0x0200
 #define NFP_NET_CFG_TXR_ADDR(_x)        (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
@@ -325,12 +326,12 @@
 
 /**
  * RX ring configuration (0x0800 - 0x0c00)
- * @NFP_NET_CFG_RXR_BASE:    Base offset for RX ring configuration
- * @NFP_NET_CFG_RXR_ADDR:    Per RX ring DMA address (8B entries)
- * @NFP_NET_CFG_RXR_SZ:      Per RX ring ring size (1B entries)
- * @NFP_NET_CFG_RXR_VEC:     Per RX ring MSI-X table entry (1B entries)
- * @NFP_NET_CFG_RXR_PRIO:    Per RX ring priority (1B entries)
- * @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
+ * %NFP_NET_CFG_RXR_BASE:    Base offset for RX ring configuration
+ * %NFP_NET_CFG_RXR_ADDR:    Per RX ring DMA address (8B entries)
+ * %NFP_NET_CFG_RXR_SZ:      Per RX ring ring size (1B entries)
+ * %NFP_NET_CFG_RXR_VEC:     Per RX ring MSI-X table entry (1B entries)
+ * %NFP_NET_CFG_RXR_PRIO:    Per RX ring priority (1B entries)
+ * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
  */
 #define NFP_NET_CFG_RXR_BASE            0x0800
 #define NFP_NET_CFG_RXR_ADDR(_x)        (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
@@ -343,7 +344,7 @@
 /**
  * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
  * These registers are only used when MSI-X auto-masking is not
- * enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set).  The array is index
+ * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set).  The array is index
  * by MSI-X entry and are 1B in size.  If an entry is zero, the
  * corresponding entry is enabled.  If the FW generates an interrupt,
  * it writes a cause into the corresponding field.  This also masks
@@ -393,8 +394,8 @@
 /**
  * Per ring stats (0x1000 - 0x1800)
  * options, 64bit per entry
- * @NFP_NET_CFG_TXR_STATS:   TX ring statistics (Packet and Byte count)
- * @NFP_NET_CFG_RXR_STATS:   RX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_TXR_STATS:   TX ring statistics (Packet and Byte count)
+ * %NFP_NET_CFG_RXR_STATS:   RX ring statistics (Packet and Byte count)
  */
 #define NFP_NET_CFG_TXR_STATS_BASE      0x1000
 #define NFP_NET_CFG_TXR_STATS(_x)       (NFP_NET_CFG_TXR_STATS_BASE + \
@@ -418,10 +419,10 @@
 
 /**
  * VLAN filtering using general use mailbox
- * @NFP_NET_CFG_VLAN_FILTER:		Base address of VLAN filter mailbox
- * @NFP_NET_CFG_VLAN_FILTER_VID:	VLAN ID to filter
- * @NFP_NET_CFG_VLAN_FILTER_PROTO:	VLAN proto to filter
- * @NFP_NET_CFG_VXLAN_SZ:		Size of the VLAN filter mailbox in bytes
+ * %NFP_NET_CFG_VLAN_FILTER:		Base address of VLAN filter mailbox
+ * %NFP_NET_CFG_VLAN_FILTER_VID:	VLAN ID to filter
+ * %NFP_NET_CFG_VLAN_FILTER_PROTO:	VLAN proto to filter
+ * %NFP_NET_CFG_VXLAN_SZ:		Size of the VLAN filter mailbox in bytes
  */
 #define NFP_NET_CFG_VLAN_FILTER		NFP_NET_CFG_MBOX_VAL
 #define  NFP_NET_CFG_VLAN_FILTER_VID	NFP_NET_CFG_VLAN_FILTER
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
new file mode 100644
index 0000000..173646e
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+
+#include "nfp_asm.h"
+#include "nfp_main.h"
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp6000/nfp6000.h"
+
+#define NFP_DUMP_SPEC_RTSYM	"_abi_dump_spec"
+
+#define ALIGN8(x)	ALIGN(x, 8)
+
+enum nfp_dumpspec_type {
+	NFP_DUMPSPEC_TYPE_CPP_CSR = 0,
+	NFP_DUMPSPEC_TYPE_XPB_CSR = 1,
+	NFP_DUMPSPEC_TYPE_ME_CSR = 2,
+	NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR = 3,
+	NFP_DUMPSPEC_TYPE_RTSYM = 4,
+	NFP_DUMPSPEC_TYPE_HWINFO = 5,
+	NFP_DUMPSPEC_TYPE_FWNAME = 6,
+	NFP_DUMPSPEC_TYPE_HWINFO_FIELD = 7,
+	NFP_DUMPSPEC_TYPE_PROLOG = 10000,
+	NFP_DUMPSPEC_TYPE_ERROR = 10001,
+};
+
+/* The following structs must be carefully aligned so that they can be used to
+ * interpret the binary dumpspec and populate the dump data in a deterministic
+ * way.
+ */
+
+/* generic type plus length */
+struct nfp_dump_tl {
+	__be32 type;
+	__be32 length;	/* chunk length to follow, aligned to 8 bytes */
+	char data[0];
+};
+
+/* NFP CPP parameters */
+struct nfp_dumpspec_cpp_isl_id {
+	u8 target;
+	u8 action;
+	u8 token;
+	u8 island;
+};
+
+struct nfp_dump_common_cpp {
+	struct nfp_dumpspec_cpp_isl_id cpp_id;
+	__be32 offset;		/* address to start dump */
+	__be32 dump_length;	/* total bytes to dump, aligned to reg size */
+};
+
+/* CSR dumpables */
+struct nfp_dumpspec_csr {
+	struct nfp_dump_tl tl;
+	struct nfp_dump_common_cpp cpp;
+	__be32 register_width;	/* in bits */
+};
+
+struct nfp_dumpspec_rtsym {
+	struct nfp_dump_tl tl;
+	char rtsym[0];
+};
+
+/* header for register dumpable */
+struct nfp_dump_csr {
+	struct nfp_dump_tl tl;
+	struct nfp_dump_common_cpp cpp;
+	__be32 register_width;	/* in bits */
+	__be32 error;		/* error code encountered while reading */
+	__be32 error_offset;	/* offset being read when error occurred */
+};
+
+struct nfp_dump_rtsym {
+	struct nfp_dump_tl tl;
+	struct nfp_dump_common_cpp cpp;
+	__be32 error;		/* error code encountered while reading */
+	u8 padded_name_length;	/* pad so data starts at 8 byte boundary */
+	char rtsym[0];
+	/* after padded_name_length, there is dump_length data */
+};
+
+struct nfp_dump_prolog {
+	struct nfp_dump_tl tl;
+	__be32 dump_level;
+};
+
+struct nfp_dump_error {
+	struct nfp_dump_tl tl;
+	__be32 error;
+	char padding[4];
+	char spec[0];
+};
+
+/* to track state through debug size calculation TLV traversal */
+struct nfp_level_size {
+	__be32 requested_level;	/* input */
+	u32 total_size;		/* output */
+};
+
+/* to track state during debug dump creation TLV traversal */
+struct nfp_dump_state {
+	__be32 requested_level;	/* input param */
+	u32 dumped_size;	/* adds up to size of dumped data */
+	u32 buf_size;		/* size of buffer pointer to by p */
+	void *p;		/* current point in dump buffer */
+};
+
+typedef int (*nfp_tlv_visit)(struct nfp_pf *pf, struct nfp_dump_tl *tl,
+			     void *param);
+
+static int
+nfp_traverse_tlvs(struct nfp_pf *pf, void *data, u32 data_length, void *param,
+		  nfp_tlv_visit tlv_visit)
+{
+	long long remaining = data_length;
+	struct nfp_dump_tl *tl;
+	u32 total_tlv_size;
+	void *p = data;
+	int err;
+
+	while (remaining >= sizeof(*tl)) {
+		tl = p;
+		if (!tl->type && !tl->length)
+			break;
+
+		if (be32_to_cpu(tl->length) > remaining - sizeof(*tl))
+			return -EINVAL;
+
+		total_tlv_size = sizeof(*tl) + be32_to_cpu(tl->length);
+
+		/* Spec TLVs should be aligned to 4 bytes. */
+		if (total_tlv_size % 4 != 0)
+			return -EINVAL;
+
+		p += total_tlv_size;
+		remaining -= total_tlv_size;
+		err = tlv_visit(pf, tl, param);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static u32 nfp_get_numeric_cpp_id(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+	return NFP_CPP_ISLAND_ID(cpp_id->target, cpp_id->action, cpp_id->token,
+				 cpp_id->island);
+}
+
+struct nfp_dumpspec *
+nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl)
+{
+	const struct nfp_rtsym *specsym;
+	struct nfp_dumpspec *dumpspec;
+	int bytes_read;
+	u32 cpp_id;
+
+	specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM);
+	if (!specsym)
+		return NULL;
+
+	/* expected size of this buffer is in the order of tens of kilobytes */
+	dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size);
+	if (!dumpspec)
+		return NULL;
+
+	dumpspec->size = specsym->size;
+
+	cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0,
+				   specsym->domain);
+
+	bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data,
+				  specsym->size);
+	if (bytes_read != specsym->size) {
+		vfree(dumpspec);
+		nfp_warn(cpp, "Debug dump specification read failed.\n");
+		return NULL;
+	}
+
+	return dumpspec;
+}
+
+static int nfp_dump_error_tlv_size(struct nfp_dump_tl *spec)
+{
+	return ALIGN8(sizeof(struct nfp_dump_error) + sizeof(*spec) +
+		      be32_to_cpu(spec->length));
+}
+
+static int nfp_calc_fwname_tlv_size(struct nfp_pf *pf)
+{
+	u32 fwname_len = strlen(nfp_mip_name(pf->mip));
+
+	return sizeof(struct nfp_dump_tl) + ALIGN8(fwname_len + 1);
+}
+
+static int nfp_calc_hwinfo_field_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+	u32 tl_len, key_len;
+	const char *value;
+
+	tl_len = be32_to_cpu(spec->length);
+	key_len = strnlen(spec->data, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv_size(spec);
+
+	value = nfp_hwinfo_lookup(pf->hwinfo, spec->data);
+	if (!value)
+		return nfp_dump_error_tlv_size(spec);
+
+	return sizeof(struct nfp_dump_tl) + ALIGN8(key_len + strlen(value) + 2);
+}
+
+static bool nfp_csr_spec_valid(struct nfp_dumpspec_csr *spec_csr)
+{
+	u32 required_read_sz = sizeof(*spec_csr) - sizeof(spec_csr->tl);
+	u32 available_sz = be32_to_cpu(spec_csr->tl.length);
+	u32 reg_width;
+
+	if (available_sz < required_read_sz)
+		return false;
+
+	reg_width = be32_to_cpu(spec_csr->register_width);
+
+	return reg_width == 32 || reg_width == 64;
+}
+
+static int
+nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec)
+{
+	struct nfp_rtsym_table *rtbl = pf->rtbl;
+	struct nfp_dumpspec_rtsym *spec_rtsym;
+	const struct nfp_rtsym *sym;
+	u32 tl_len, key_len;
+	u32 size;
+
+	spec_rtsym = (struct nfp_dumpspec_rtsym *)spec;
+	tl_len = be32_to_cpu(spec->length);
+	key_len = strnlen(spec_rtsym->rtsym, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv_size(spec);
+
+	sym = nfp_rtsym_lookup(rtbl, spec_rtsym->rtsym);
+	if (!sym)
+		return nfp_dump_error_tlv_size(spec);
+
+	if (sym->type == NFP_RTSYM_TYPE_ABS)
+		size = sizeof(sym->addr);
+	else
+		size = sym->size;
+
+	return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) +
+	       ALIGN8(size);
+}
+
+static int
+nfp_add_tlv_size(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+	struct nfp_dumpspec_csr *spec_csr;
+	u32 *size = param;
+	u32 hwinfo_size;
+
+	switch (be32_to_cpu(tl->type)) {
+	case NFP_DUMPSPEC_TYPE_FWNAME:
+		*size += nfp_calc_fwname_tlv_size(pf);
+		break;
+	case NFP_DUMPSPEC_TYPE_CPP_CSR:
+	case NFP_DUMPSPEC_TYPE_XPB_CSR:
+	case NFP_DUMPSPEC_TYPE_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		if (!nfp_csr_spec_valid(spec_csr))
+			*size += nfp_dump_error_tlv_size(tl);
+		else
+			*size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+				 ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+		break;
+	case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		if (!nfp_csr_spec_valid(spec_csr))
+			*size += nfp_dump_error_tlv_size(tl);
+		else
+			*size += ALIGN8(sizeof(struct nfp_dump_csr)) +
+				 ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length) *
+					NFP_IND_NUM_CONTEXTS);
+		break;
+	case NFP_DUMPSPEC_TYPE_RTSYM:
+		*size += nfp_calc_rtsym_dump_sz(pf, tl);
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO:
+		hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+		*size += sizeof(struct nfp_dump_tl) + ALIGN8(hwinfo_size);
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+		*size += nfp_calc_hwinfo_field_sz(pf, tl);
+		break;
+	default:
+		*size += nfp_dump_error_tlv_size(tl);
+		break;
+	}
+
+	return 0;
+}
+
+static int
+nfp_calc_specific_level_size(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+			     void *param)
+{
+	struct nfp_level_size *lev_sz = param;
+
+	if (dump_level->type != lev_sz->requested_level)
+		return 0;
+
+	return nfp_traverse_tlvs(pf, dump_level->data,
+				 be32_to_cpu(dump_level->length),
+				 &lev_sz->total_size, nfp_add_tlv_size);
+}
+
+s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				u32 flag)
+{
+	struct nfp_level_size lev_sz;
+	int err;
+
+	lev_sz.requested_level = cpu_to_be32(flag);
+	lev_sz.total_size = ALIGN8(sizeof(struct nfp_dump_prolog));
+
+	err = nfp_traverse_tlvs(pf, spec->data, spec->size, &lev_sz,
+				nfp_calc_specific_level_size);
+	if (err)
+		return err;
+
+	return lev_sz.total_size;
+}
+
+static int nfp_add_tlv(u32 type, u32 total_tlv_sz, struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *tl = dump->p;
+
+	if (total_tlv_sz > dump->buf_size)
+		return -ENOSPC;
+
+	if (dump->buf_size - total_tlv_sz < dump->dumped_size)
+		return -ENOSPC;
+
+	tl->type = cpu_to_be32(type);
+	tl->length = cpu_to_be32(total_tlv_sz - sizeof(*tl));
+
+	dump->dumped_size += total_tlv_sz;
+	dump->p += total_tlv_sz;
+
+	return 0;
+}
+
+static int
+nfp_dump_error_tlv(struct nfp_dump_tl *spec, int error,
+		   struct nfp_dump_state *dump)
+{
+	struct nfp_dump_error *dump_header = dump->p;
+	u32 total_spec_size, total_size;
+	int err;
+
+	total_spec_size = sizeof(*spec) + be32_to_cpu(spec->length);
+	total_size = ALIGN8(sizeof(*dump_header) + total_spec_size);
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_ERROR, total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->error = cpu_to_be32(error);
+	memcpy(dump_header->spec, spec, total_spec_size);
+
+	return 0;
+}
+
+static int nfp_dump_fwname(struct nfp_pf *pf, struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *dump_header = dump->p;
+	u32 fwname_len, total_size;
+	const char *fwname;
+	int err;
+
+	fwname = nfp_mip_name(pf->mip);
+	fwname_len = strlen(fwname);
+	total_size = sizeof(*dump_header) + ALIGN8(fwname_len + 1);
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_FWNAME, total_size, dump);
+	if (err)
+		return err;
+
+	memcpy(dump_header->data, fwname, fwname_len);
+
+	return 0;
+}
+
+static int
+nfp_dump_hwinfo(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+		struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *dump_header = dump->p;
+	u32 hwinfo_size, total_size;
+	char *hwinfo;
+	int err;
+
+	hwinfo = nfp_hwinfo_get_packed_strings(pf->hwinfo);
+	hwinfo_size = nfp_hwinfo_get_packed_str_size(pf->hwinfo);
+	total_size = sizeof(*dump_header) + ALIGN8(hwinfo_size);
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO, total_size, dump);
+	if (err)
+		return err;
+
+	memcpy(dump_header->data, hwinfo, hwinfo_size);
+
+	return 0;
+}
+
+static int nfp_dump_hwinfo_field(struct nfp_pf *pf, struct nfp_dump_tl *spec,
+				 struct nfp_dump_state *dump)
+{
+	struct nfp_dump_tl *dump_header = dump->p;
+	u32 tl_len, key_len, val_len;
+	const char *key, *value;
+	u32 total_size;
+	int err;
+
+	tl_len = be32_to_cpu(spec->length);
+	key_len = strnlen(spec->data, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv(spec, -EINVAL, dump);
+
+	key = spec->data;
+	value = nfp_hwinfo_lookup(pf->hwinfo, key);
+	if (!value)
+		return nfp_dump_error_tlv(spec, -ENOENT, dump);
+
+	val_len = strlen(value);
+	total_size = sizeof(*dump_header) + ALIGN8(key_len + val_len + 2);
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_HWINFO_FIELD, total_size, dump);
+	if (err)
+		return err;
+
+	memcpy(dump_header->data, key, key_len + 1);
+	memcpy(dump_header->data + key_len + 1, value, val_len + 1);
+
+	return 0;
+}
+
+static bool is_xpb_read(struct nfp_dumpspec_cpp_isl_id *cpp_id)
+{
+	return cpp_id->target == NFP_CPP_TARGET_ISLAND_XPB &&
+	       cpp_id->action == 0 && cpp_id->token == 0;
+}
+
+static int
+nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
+		   struct nfp_dump_state *dump)
+{
+	struct nfp_dump_csr *dump_header = dump->p;
+	u32 reg_sz, header_size, total_size;
+	u32 cpp_rd_addr, max_rd_addr;
+	int bytes_read;
+	void *dest;
+	u32 cpp_id;
+	int err;
+
+	if (!nfp_csr_spec_valid(spec_csr))
+		return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+	reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+	header_size = ALIGN8(sizeof(*dump_header));
+	total_size = header_size +
+		     ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
+	dest = dump->p + header_size;
+
+	err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->cpp = spec_csr->cpp;
+	dump_header->register_width = spec_csr->register_width;
+
+	cpp_id = nfp_get_numeric_cpp_id(&spec_csr->cpp.cpp_id);
+	cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+	max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+
+	while (cpp_rd_addr < max_rd_addr) {
+		if (is_xpb_read(&spec_csr->cpp.cpp_id))
+			bytes_read = nfp_xpb_readl(pf->cpp, cpp_rd_addr,
+						   (u32 *)dest);
+		else
+			bytes_read = nfp_cpp_read(pf->cpp, cpp_id, cpp_rd_addr,
+						  dest, reg_sz);
+		if (bytes_read != reg_sz) {
+			if (bytes_read >= 0)
+				bytes_read = -EIO;
+			dump_header->error = cpu_to_be32(bytes_read);
+			dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+			break;
+		}
+		cpp_rd_addr += reg_sz;
+		dest += reg_sz;
+	}
+
+	return 0;
+}
+
+/* Write context to CSRCtxPtr, then read from it. Then the value can be read
+ * from IndCtxStatus.
+ */
+static int
+nfp_read_indirect_csr(struct nfp_cpp *cpp,
+		      struct nfp_dumpspec_cpp_isl_id cpp_params, u32 offset,
+		      u32 reg_sz, u32 context, void *dest)
+{
+	u32 csr_ctx_ptr_offs;
+	u32 cpp_id;
+	int result;
+
+	csr_ctx_ptr_offs = nfp_get_ind_csr_ctx_ptr_offs(offset);
+	cpp_id = NFP_CPP_ISLAND_ID(cpp_params.target,
+				   NFP_IND_ME_REFL_WR_SIG_INIT,
+				   cpp_params.token, cpp_params.island);
+	result = nfp_cpp_writel(cpp, cpp_id, csr_ctx_ptr_offs, context);
+	if (result != sizeof(context))
+		return result < 0 ? result : -EIO;
+
+	cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+	result = nfp_cpp_read(cpp, cpp_id, csr_ctx_ptr_offs, dest, reg_sz);
+	if (result != reg_sz)
+		return result < 0 ? result : -EIO;
+
+	result = nfp_cpp_read(cpp, cpp_id, offset, dest, reg_sz);
+	if (result != reg_sz)
+		return result < 0 ? result : -EIO;
+
+	return 0;
+}
+
+static int
+nfp_read_all_indirect_csr_ctx(struct nfp_cpp *cpp,
+			      struct nfp_dumpspec_csr *spec_csr, u32 address,
+			      u32 reg_sz, void *dest)
+{
+	u32 ctx;
+	int err;
+
+	for (ctx = 0; ctx < NFP_IND_NUM_CONTEXTS; ctx++) {
+		err = nfp_read_indirect_csr(cpp, spec_csr->cpp.cpp_id, address,
+					    reg_sz, ctx, dest + ctx * reg_sz);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_indirect_csr_range(struct nfp_pf *pf,
+			    struct nfp_dumpspec_csr *spec_csr,
+			    struct nfp_dump_state *dump)
+{
+	struct nfp_dump_csr *dump_header = dump->p;
+	u32 reg_sz, header_size, total_size;
+	u32 cpp_rd_addr, max_rd_addr;
+	u32 reg_data_length;
+	void *dest;
+	int err;
+
+	if (!nfp_csr_spec_valid(spec_csr))
+		return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+
+	reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
+	header_size = ALIGN8(sizeof(*dump_header));
+	reg_data_length = be32_to_cpu(spec_csr->cpp.dump_length) *
+			  NFP_IND_NUM_CONTEXTS;
+	total_size = header_size + ALIGN8(reg_data_length);
+	dest = dump->p + header_size;
+
+	err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->cpp = spec_csr->cpp;
+	dump_header->register_width = spec_csr->register_width;
+
+	cpp_rd_addr = be32_to_cpu(spec_csr->cpp.offset);
+	max_rd_addr = cpp_rd_addr + be32_to_cpu(spec_csr->cpp.dump_length);
+	while (cpp_rd_addr < max_rd_addr) {
+		err = nfp_read_all_indirect_csr_ctx(pf->cpp, spec_csr,
+						    cpp_rd_addr, reg_sz, dest);
+		if (err) {
+			dump_header->error = cpu_to_be32(err);
+			dump_header->error_offset = cpu_to_be32(cpp_rd_addr);
+			break;
+		}
+		cpp_rd_addr += reg_sz;
+		dest += reg_sz * NFP_IND_NUM_CONTEXTS;
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
+		      struct nfp_dump_state *dump)
+{
+	struct nfp_dump_rtsym *dump_header = dump->p;
+	struct nfp_dumpspec_cpp_isl_id cpp_params;
+	struct nfp_rtsym_table *rtbl = pf->rtbl;
+	const struct nfp_rtsym *sym;
+	u32 header_size, total_size;
+	u32 tl_len, key_len;
+	int bytes_read;
+	u32 cpp_id;
+	void *dest;
+	int err;
+
+	tl_len = be32_to_cpu(spec->tl.length);
+	key_len = strnlen(spec->rtsym, tl_len);
+	if (key_len == tl_len)
+		return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+
+	sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
+	if (!sym)
+		return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+
+	header_size =
+		ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1);
+	total_size = header_size + ALIGN8(sym->size);
+	dest = dump->p + header_size;
+
+	err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+	if (err)
+		return err;
+
+	dump_header->padded_name_length =
+		header_size - offsetof(struct nfp_dump_rtsym, rtsym);
+	memcpy(dump_header->rtsym, spec->rtsym, key_len + 1);
+
+	if (sym->type == NFP_RTSYM_TYPE_ABS) {
+		dump_header->cpp.dump_length = cpu_to_be32(sizeof(sym->addr));
+		*(u64 *)dest = sym->addr;
+	} else {
+		cpp_params.target = sym->target;
+		cpp_params.action = NFP_CPP_ACTION_RW;
+		cpp_params.token  = 0;
+		cpp_params.island = sym->domain;
+		cpp_id = nfp_get_numeric_cpp_id(&cpp_params);
+		dump_header->cpp.cpp_id = cpp_params;
+		dump_header->cpp.offset = cpu_to_be32(sym->addr);
+		dump_header->cpp.dump_length = cpu_to_be32(sym->size);
+		bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest,
+					  sym->size);
+		if (bytes_read != sym->size) {
+			if (bytes_read >= 0)
+				bytes_read = -EIO;
+			dump_header->error = cpu_to_be32(bytes_read);
+		}
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_for_tlv(struct nfp_pf *pf, struct nfp_dump_tl *tl, void *param)
+{
+	struct nfp_dumpspec_rtsym *spec_rtsym;
+	struct nfp_dump_state *dump = param;
+	struct nfp_dumpspec_csr *spec_csr;
+	int err;
+
+	switch (be32_to_cpu(tl->type)) {
+	case NFP_DUMPSPEC_TYPE_FWNAME:
+		err = nfp_dump_fwname(pf, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_CPP_CSR:
+	case NFP_DUMPSPEC_TYPE_XPB_CSR:
+	case NFP_DUMPSPEC_TYPE_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		err = nfp_dump_csr_range(pf, spec_csr, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_INDIRECT_ME_CSR:
+		spec_csr = (struct nfp_dumpspec_csr *)tl;
+		err = nfp_dump_indirect_csr_range(pf, spec_csr, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_RTSYM:
+		spec_rtsym = (struct nfp_dumpspec_rtsym *)tl;
+		err = nfp_dump_single_rtsym(pf, spec_rtsym, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO:
+		err = nfp_dump_hwinfo(pf, tl, dump);
+		if (err)
+			return err;
+		break;
+	case NFP_DUMPSPEC_TYPE_HWINFO_FIELD:
+		err = nfp_dump_hwinfo_field(pf, tl, dump);
+		if (err)
+			return err;
+		break;
+	default:
+		err = nfp_dump_error_tlv(tl, -EOPNOTSUPP, dump);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int
+nfp_dump_specific_level(struct nfp_pf *pf, struct nfp_dump_tl *dump_level,
+			void *param)
+{
+	struct nfp_dump_state *dump = param;
+
+	if (dump_level->type != dump->requested_level)
+		return 0;
+
+	return nfp_traverse_tlvs(pf, dump_level->data,
+				 be32_to_cpu(dump_level->length), dump,
+				 nfp_dump_for_tlv);
+}
+
+static int nfp_dump_populate_prolog(struct nfp_dump_state *dump)
+{
+	struct nfp_dump_prolog *prolog = dump->p;
+	u32 total_size;
+	int err;
+
+	total_size = ALIGN8(sizeof(*prolog));
+
+	err = nfp_add_tlv(NFP_DUMPSPEC_TYPE_PROLOG, total_size, dump);
+	if (err)
+		return err;
+
+	prolog->dump_level = dump->requested_level;
+
+	return 0;
+}
+
+int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec,
+				 struct ethtool_dump *dump_param, void *dest)
+{
+	struct nfp_dump_state dump;
+	int err;
+
+	dump.requested_level = cpu_to_be32(dump_param->flag);
+	dump.dumped_size = 0;
+	dump.p = dest;
+	dump.buf_size = dump_param->len;
+
+	err = nfp_dump_populate_prolog(&dump);
+	if (err)
+		return err;
+
+	err = nfp_traverse_tlvs(pf, spec->data, spec->size, &dump,
+				nfp_dump_specific_level);
+	if (err)
+		return err;
+
+	/* Set size of actual dump, to trigger warning if different from
+	 * calculated size.
+	 */
+	dump_param->len = dump.dumped_size;
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 2801ecd..00b8c64 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -47,18 +47,16 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/ethtool.h>
+#include <linux/firmware.h>
 
 #include "nfpcore/nfp.h"
 #include "nfpcore/nfp_nsp.h"
 #include "nfp_app.h"
+#include "nfp_main.h"
 #include "nfp_net_ctrl.h"
 #include "nfp_net.h"
 #include "nfp_port.h"
 
-enum nfp_dump_diag {
-	NFP_DUMP_NSP_DIAG = 0,
-};
-
 struct nfp_et_stat {
 	char name[ETH_GSTRING_LEN];
 	int off;
@@ -1066,15 +1064,34 @@ nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer)
 	return ret;
 }
 
+/* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
+ * based dumps), since flag 0 (default) calculates the length in
+ * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
+ * without setting the flag first, for backward compatibility.
+ */
 static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
 {
 	struct nfp_app *app = nfp_app_from_netdev(netdev);
+	s64 len;
 
 	if (!app)
 		return -EOPNOTSUPP;
 
-	if (val->flag != NFP_DUMP_NSP_DIAG)
-		return -EINVAL;
+	if (val->flag == NFP_DUMP_NSP_DIAG) {
+		app->pf->dump_flag = val->flag;
+		return 0;
+	}
+
+	if (!app->pf->dumpspec)
+		return -EOPNOTSUPP;
+
+	len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
+					  val->flag);
+	if (len < 0)
+		return len;
+
+	app->pf->dump_flag = val->flag;
+	app->pf->dump_len = len;
 
 	return 0;
 }
@@ -1082,14 +1099,37 @@ static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
 static int
 nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
 {
-	return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL);
+	struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+	if (!app)
+		return -EOPNOTSUPP;
+
+	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+		return nfp_dump_nsp_diag(app, dump, NULL);
+
+	dump->flag = app->pf->dump_flag;
+	dump->len = app->pf->dump_len;
+
+	return 0;
 }
 
 static int
 nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
 		      void *buffer)
 {
-	return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer);
+	struct nfp_app *app = nfp_app_from_netdev(netdev);
+
+	if (!app)
+		return -EOPNOTSUPP;
+
+	if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
+		return nfp_dump_nsp_diag(app, dump, buffer);
+
+	dump->flag = app->pf->dump_flag;
+	dump->len = app->pf->dump_len;
+
+	return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
+					    buffer);
 }
 
 static int nfp_net_set_coalesce(struct net_device *netdev,
@@ -1230,6 +1270,57 @@ static int nfp_net_set_channels(struct net_device *netdev,
 	return nfp_net_set_num_rings(nn, total_rx, total_tx);
 }
 
+static int
+nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash)
+{
+	const struct firmware *fw;
+	struct nfp_app *app;
+	struct nfp_nsp *nsp;
+	struct device *dev;
+	int err;
+
+	if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
+		return -EOPNOTSUPP;
+
+	app = nfp_app_from_netdev(netdev);
+	if (!app)
+		return -EOPNOTSUPP;
+
+	dev = &app->pdev->dev;
+
+	nsp = nfp_nsp_open(app->cpp);
+	if (IS_ERR(nsp)) {
+		err = PTR_ERR(nsp);
+		dev_err(dev, "Failed to access the NSP: %d\n", err);
+		return err;
+	}
+
+	err = request_firmware_direct(&fw, flash->data, dev);
+	if (err)
+		goto exit_close_nsp;
+
+	dev_info(dev, "Please be patient while writing flash image: %s\n",
+		 flash->data);
+	dev_hold(netdev);
+	rtnl_unlock();
+
+	err = nfp_nsp_write_flash(nsp, fw);
+	if (err < 0) {
+		dev_err(dev, "Flash write failed: %d\n", err);
+		goto exit_rtnl_lock;
+	}
+	dev_info(dev, "Finished writing flash image\n");
+
+exit_rtnl_lock:
+	rtnl_lock();
+	dev_put(netdev);
+	release_firmware(fw);
+
+exit_close_nsp:
+	nfp_nsp_close(nsp);
+	return err;
+}
+
 static const struct ethtool_ops nfp_net_ethtool_ops = {
 	.get_drvinfo		= nfp_net_get_drvinfo,
 	.get_link		= ethtool_op_get_link,
@@ -1240,6 +1331,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
 	.get_sset_count		= nfp_net_get_sset_count,
 	.get_rxnfc		= nfp_net_get_rxnfc,
 	.set_rxnfc		= nfp_net_set_rxnfc,
+	.flash_device		= nfp_net_flash_device,
 	.get_rxfh_indir_size	= nfp_net_get_rxfh_indir_size,
 	.get_rxfh_key_size	= nfp_net_get_rxfh_key_size,
 	.get_rxfh		= nfp_net_get_rxfh,
@@ -1265,6 +1357,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
 	.get_strings		= nfp_port_get_strings,
 	.get_ethtool_stats	= nfp_port_get_stats,
 	.get_sset_count		= nfp_port_get_sset_count,
+	.flash_device		= nfp_net_flash_device,
 	.set_dump		= nfp_app_set_dump,
 	.get_dump_flag		= nfp_app_get_dump_flag,
 	.get_dump_data		= nfp_app_get_dump_data,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 78b36c6..317f87c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -186,6 +186,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
 	return -EINVAL;
 }
 
+static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct nfp_repr *repr = netdev_priv(netdev);
+
+	return nfp_app_change_mtu(repr->app, netdev, new_mtu);
+}
+
 static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
 	struct nfp_repr *repr = netdev_priv(netdev);
@@ -240,6 +247,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
 	.ndo_open		= nfp_repr_open,
 	.ndo_stop		= nfp_repr_stop,
 	.ndo_start_xmit		= nfp_repr_xmit,
+	.ndo_change_mtu		= nfp_repr_change_mtu,
 	.ndo_get_stats64	= nfp_repr_get_stats64,
 	.ndo_has_offload_stats	= nfp_repr_has_offload_stats,
 	.ndo_get_offload_stats	= nfp_repr_get_offload_stats,
@@ -336,6 +344,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app)
 	if (!netdev)
 		return NULL;
 
+	netif_carrier_off(netdev);
+
 	repr = netdev_priv(netdev);
 	repr->netdev = netdev;
 	repr->app = app;
@@ -375,11 +385,22 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
 				 enum nfp_repr_type type)
 {
 	struct nfp_reprs *reprs;
+	int i;
 
-	reprs = nfp_app_reprs_set(app, type, NULL);
+	reprs = rcu_dereference_protected(app->reprs[type],
+					  lockdep_is_held(&app->pf->lock));
 	if (!reprs)
 		return;
 
+	/* Preclean must happen before we remove the reprs reference from the
+	 * app below.
+	 */
+	for (i = 0; i < reprs->num_reprs; i++)
+		if (reprs->reprs[i])
+			nfp_app_repr_preclean(app, reprs->reprs[i]);
+
+	reprs = nfp_app_reprs_set(app, type, NULL);
+
 	synchronize_rcu();
 	nfp_reprs_clean_and_free(reprs);
 }
@@ -418,8 +439,10 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app)
 			continue;
 
 		repr = netdev_priv(old_reprs->reprs[i]);
-		if (repr->port->type == NFP_PORT_INVALID)
+		if (repr->port->type == NFP_PORT_INVALID) {
+			nfp_app_repr_preclean(app, old_reprs->reprs[i]);
 			continue;
+		}
 
 		reprs->reprs[i] = old_reprs->reprs[i];
 	}
@@ -436,7 +459,6 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app)
 		if (repr->port->type != NFP_PORT_INVALID)
 			continue;
 
-		nfp_app_repr_stop(app, repr);
 		nfp_repr_clean(repr);
 	}
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 5d4d897..cbc7bad 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -89,6 +89,7 @@ struct nfp_repr {
  * @NFP_REPR_TYPE_PHYS_PORT:	external NIC port
  * @NFP_REPR_TYPE_PF:		physical function
  * @NFP_REPR_TYPE_VF:		virtual function
+ * @__NFP_REPR_TYPE_MAX:	number of representor types
  */
 enum nfp_repr_type {
 	NFP_REPR_TYPE_PHYS_PORT,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
index 3ce51f0..ced62d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
@@ -49,6 +49,8 @@
 struct nfp_hwinfo;
 struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
 const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo);
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo);
 
 /* Implemented in nfp_nsp.c, low level functions */
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
index 5798adc..c8f2c06 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
@@ -242,6 +242,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
 		      void *buffer, size_t length);
 int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
 		       const void *buffer, size_t length);
+size_t nfp_cpp_area_size(struct nfp_cpp_area *area);
 const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
 void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
 struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 04dd575..2826247 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -372,8 +372,7 @@ nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
  * that it can be accessed directly.
  *
  * NOTE: @address and @size must be 32-bit aligned values.
- *
- * NOTE: The area must also be 'released' when the structure is freed.
+ * The area must also be 'released' when the structure is freed.
  *
  * Return: NFP CPP Area handle, or NULL
  */
@@ -536,8 +535,7 @@ void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
  * Read data from indicated CPP region.
  *
  * NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
  *
  * Return: length of io, or -ERRNO
  */
@@ -558,8 +556,7 @@ int nfp_cpp_area_read(struct nfp_cpp_area *area,
  * Write data to indicated CPP region.
  *
  * NOTE: @offset and @length must be 32-bit aligned values.
- *
- * NOTE: Area must have been locked down with an 'acquire'.
+ * Area must have been locked down with an 'acquire'.
  *
  * Return: length of io, or -ERRNO
  */
@@ -571,6 +568,17 @@ int nfp_cpp_area_write(struct nfp_cpp_area *area,
 }
 
 /**
+ * nfp_cpp_area_size() - return size of a CPP area
+ * @cpp_area:	CPP area handle
+ *
+ * Return: Size of the area
+ */
+size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
+{
+	return cpp_area->size;
+}
+
+/**
  * nfp_cpp_area_name() - return name of a CPP area
  * @cpp_area:	CPP area handle
  *
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
index 4f24aff..063a9a6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
@@ -302,3 +302,13 @@ const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
 
 	return NULL;
 }
+
+char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo)
+{
+	return hwinfo->data;
+}
+
+u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo)
+{
+	return le32_to_cpu(hwinfo->size) - sizeof(u32);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 14a6d1b..39abac6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -51,6 +51,9 @@
 #include "nfp_cpp.h"
 #include "nfp_nsp.h"
 
+#define NFP_NSP_TIMEOUT_DEFAULT	30
+#define NFP_NSP_TIMEOUT_BOOT	30
+
 /* Offsets relative to the CSR base */
 #define NSP_STATUS		0x00
 #define   NSP_STATUS_MAGIC	GENMASK_ULL(63, 48)
@@ -93,6 +96,7 @@ enum nfp_nsp_cmd {
 	SPCODE_FW_LOAD		= 6, /* Load fw from buffer, len in option */
 	SPCODE_ETH_RESCAN	= 7, /* Rescan ETHs, write ETH_TABLE to buf */
 	SPCODE_ETH_CONTROL	= 8, /* Update media config from buffer */
+	SPCODE_NSP_WRITE_FLASH	= 11, /* Load and flash image from buffer */
 	SPCODE_NSP_SENSORS	= 12, /* Read NSP sensor(s) */
 	SPCODE_NSP_IDENTIFY	= 13, /* Read NSP version */
 };
@@ -260,10 +264,10 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
 }
 
 static int
-nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
-		 u32 nsp_cpp, u64 addr, u64 mask, u64 val)
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
+		 u64 mask, u64 val, u32 timeout_sec)
 {
-	const unsigned long wait_until = jiffies + 30 * HZ;
+	const unsigned long wait_until = jiffies + timeout_sec * HZ;
 	int err;
 
 	for (;;) {
@@ -285,12 +289,13 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
 }
 
 /**
- * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * __nfp_nsp_command() - Execute a command on the NFP Service Processor
  * @state:	NFP SP state
  * @code:	NFP SP Command Code
  * @option:	NFP SP Command Argument
  * @buff_cpp:	NFP SP Buffer CPP Address info
  * @buff_addr:	NFP SP Buffer Host address
+ * @timeout_sec:Timeout value to wait for completion in seconds
  *
  * Return: 0 for success with no result
  *
@@ -300,10 +305,11 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
  *	-ENODEV if the NSP is not a supported model
  *	-EBUSY if the NSP is stuck
  *	-EINTR if interrupted while waiting for completion
- *	-ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ *	-ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete
  */
-static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
-			   u32 buff_cpp, u64 buff_addr)
+static int
+__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+		  u64 buff_addr, u32 timeout_sec)
 {
 	u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
 	struct nfp_cpp *cpp = state->cpp;
@@ -341,8 +347,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
 		return err;
 
 	/* Wait for NSP_COMMAND_START to go to 0 */
-	err = nfp_nsp_wait_reg(cpp, &reg,
-			       nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
+	err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+			       NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT);
 	if (err) {
 		nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
 			err, code);
@@ -350,8 +356,8 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
 	}
 
 	/* Wait for NSP_STATUS_BUSY to go to 0 */
-	err = nfp_nsp_wait_reg(cpp, &reg,
-			       nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
+	err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+			       0, timeout_sec);
 	if (err) {
 		nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
 			err, code);
@@ -374,9 +380,18 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
 	return ret_val;
 }
 
-static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
-			       const void *in_buf, unsigned int in_size,
-			       void *out_buf, unsigned int out_size)
+static int
+nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp,
+		u64 buff_addr)
+{
+	return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr,
+				 NFP_NSP_TIMEOUT_DEFAULT);
+}
+
+static int
+__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+		      const void *in_buf, unsigned int in_size, void *out_buf,
+		      unsigned int out_size, u32 timeout_sec)
 {
 	struct nfp_cpp *cpp = nsp->cpp;
 	unsigned int max_size;
@@ -429,7 +444,8 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
 			return err;
 	}
 
-	ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+	ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf,
+				timeout_sec);
 	if (ret < 0)
 		return ret;
 
@@ -442,12 +458,23 @@ static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
 	return ret;
 }
 
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+		    const void *in_buf, unsigned int in_size, void *out_buf,
+		    unsigned int out_size)
+{
+	return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size,
+				     out_buf, out_size,
+				     NFP_NSP_TIMEOUT_DEFAULT);
+}
+
 int nfp_nsp_wait(struct nfp_nsp *state)
 {
-	const unsigned long wait_until = jiffies + 30 * HZ;
+	const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ;
 	int err;
 
-	nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
+	nfp_dbg(state->cpp, "Waiting for NSP to respond (%u sec max).\n",
+		NFP_NSP_TIMEOUT_BOOT);
 
 	for (;;) {
 		const unsigned long start_time = jiffies;
@@ -488,6 +515,17 @@ int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
 				   fw->size, NULL, 0);
 }
 
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw)
+{
+	/* The flash time is specified to take a maximum of 70s so we add an
+	 * additional factor to this spec time.
+	 */
+	u32 timeout_sec = 2.5 * 70;
+
+	return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size,
+				     fw->data, fw->size, NULL, 0, timeout_sec);
+}
+
 int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
 {
 	return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 650ca1a..e983c9d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -48,6 +48,7 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
 int nfp_nsp_wait(struct nfp_nsp *state);
 int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
 int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
 int nfp_nsp_mac_reinit(struct nfp_nsp *state);
 
 static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 481876b..21e15cb 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -793,7 +793,7 @@ struct fe_priv {
 	/* rx specific fields.
 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 	 */
-	union ring_type get_rx, put_rx, first_rx, last_rx;
+	union ring_type get_rx, put_rx, last_rx;
 	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
 	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
 	struct nv_skb_map *rx_skb;
@@ -822,9 +822,9 @@ struct fe_priv {
 	/*
 	 * tx specific fields.
 	 */
-	union ring_type get_tx, put_tx, first_tx, last_tx;
+	union ring_type get_tx, put_tx, last_tx;
 	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
-	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
+	struct nv_skb_map *last_tx_ctx;
 	struct nv_skb_map *tx_skb;
 
 	union ring_type tx_ring;
@@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev)
 	struct ring_desc *less_rx;
 
 	less_rx = np->get_rx.orig;
-	if (less_rx-- == np->first_rx.orig)
+	if (less_rx-- == np->rx_ring.orig)
 		less_rx = np->last_rx.orig;
 
 	while (np->put_rx.orig != less_rx) {
 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
-		if (skb) {
+		if (likely(skb)) {
 			np->put_rx_ctx->skb = skb;
 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
 							     skb->data,
@@ -1833,7 +1833,7 @@ static int nv_alloc_rx(struct net_device *dev)
 			wmb();
 			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
 			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
-				np->put_rx.orig = np->first_rx.orig;
+				np->put_rx.orig = np->rx_ring.orig;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
 				np->put_rx_ctx = np->first_rx_ctx;
 		} else {
@@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
 	struct ring_desc_ex *less_rx;
 
 	less_rx = np->get_rx.ex;
-	if (less_rx-- == np->first_rx.ex)
+	if (less_rx-- == np->rx_ring.ex)
 		less_rx = np->last_rx.ex;
 
 	while (np->put_rx.ex != less_rx) {
 		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
-		if (skb) {
+		if (likely(skb)) {
 			np->put_rx_ctx->skb = skb;
 			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
 							     skb->data,
@@ -1875,7 +1875,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
 			wmb();
 			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
 			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
-				np->put_rx.ex = np->first_rx.ex;
+				np->put_rx.ex = np->rx_ring.ex;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
 				np->put_rx_ctx = np->first_rx_ctx;
 		} else {
@@ -1903,7 +1903,8 @@ static void nv_init_rx(struct net_device *dev)
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
 
-	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
+	np->get_rx = np->rx_ring;
+	np->put_rx = np->rx_ring;
 
 	if (!nv_optimized(np))
 		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
@@ -1932,13 +1933,15 @@ static void nv_init_tx(struct net_device *dev)
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
 
-	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
+	np->get_tx = np->tx_ring;
+	np->put_tx = np->tx_ring;
 
 	if (!nv_optimized(np))
 		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
 	else
 		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
-	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
+	np->get_tx_ctx = np->tx_skb;
+	np->put_tx_ctx = np->tx_skb;
 	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
 	netdev_reset_queue(np->dev);
 	np->tx_pkts_in_progress = 0;
@@ -2248,9 +2251,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 		offset += bcnt;
 		size -= bcnt;
 		if (unlikely(put_tx++ == np->last_tx.orig))
-			put_tx = np->first_tx.orig;
+			put_tx = np->tx_ring.orig;
 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-			np->put_tx_ctx = np->first_tx_ctx;
+			np->put_tx_ctx = np->tx_skb;
 	} while (size);
 
 	/* setup the fragments */
@@ -2276,7 +2279,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 				do {
 					nv_unmap_txskb(np, start_tx_ctx);
 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-						tmp_tx_ctx = np->first_tx_ctx;
+						tmp_tx_ctx = np->tx_skb;
 				} while (tmp_tx_ctx != np->put_tx_ctx);
 				dev_kfree_skb_any(skb);
 				np->put_tx_ctx = start_tx_ctx;
@@ -2294,18 +2297,18 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 			offset += bcnt;
 			frag_size -= bcnt;
 			if (unlikely(put_tx++ == np->last_tx.orig))
-				put_tx = np->first_tx.orig;
+				put_tx = np->tx_ring.orig;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-				np->put_tx_ctx = np->first_tx_ctx;
+				np->put_tx_ctx = np->tx_skb;
 		} while (frag_size);
 	}
 
-	if (unlikely(put_tx == np->first_tx.orig))
+	if (unlikely(put_tx == np->tx_ring.orig))
 		prev_tx = np->last_tx.orig;
 	else
 		prev_tx = put_tx - 1;
 
-	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+	if (unlikely(np->put_tx_ctx == np->tx_skb))
 		prev_tx_ctx = np->last_tx_ctx;
 	else
 		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2406,9 +2409,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 		offset += bcnt;
 		size -= bcnt;
 		if (unlikely(put_tx++ == np->last_tx.ex))
-			put_tx = np->first_tx.ex;
+			put_tx = np->tx_ring.ex;
 		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-			np->put_tx_ctx = np->first_tx_ctx;
+			np->put_tx_ctx = np->tx_skb;
 	} while (size);
 
 	/* setup the fragments */
@@ -2434,7 +2437,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 				do {
 					nv_unmap_txskb(np, start_tx_ctx);
 					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
-						tmp_tx_ctx = np->first_tx_ctx;
+						tmp_tx_ctx = np->tx_skb;
 				} while (tmp_tx_ctx != np->put_tx_ctx);
 				dev_kfree_skb_any(skb);
 				np->put_tx_ctx = start_tx_ctx;
@@ -2452,18 +2455,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
 			offset += bcnt;
 			frag_size -= bcnt;
 			if (unlikely(put_tx++ == np->last_tx.ex))
-				put_tx = np->first_tx.ex;
+				put_tx = np->tx_ring.ex;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
-				np->put_tx_ctx = np->first_tx_ctx;
+				np->put_tx_ctx = np->tx_skb;
 		} while (frag_size);
 	}
 
-	if (unlikely(put_tx == np->first_tx.ex))
+	if (unlikely(put_tx == np->tx_ring.ex))
 		prev_tx = np->last_tx.ex;
 	else
 		prev_tx = put_tx - 1;
 
-	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
+	if (unlikely(np->put_tx_ctx == np->tx_skb))
 		prev_tx_ctx = np->last_tx_ctx;
 	else
 		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2563,7 +2566,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
 
 		if (np->desc_ver == DESC_VER_1) {
 			if (flags & NV_TX_LASTPACKET) {
-				if (flags & NV_TX_ERROR) {
+				if (unlikely(flags & NV_TX_ERROR)) {
 					if ((flags & NV_TX_RETRYERROR)
 					    && !(flags & NV_TX_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
@@ -2580,7 +2583,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
 			}
 		} else {
 			if (flags & NV_TX2_LASTPACKET) {
-				if (flags & NV_TX2_ERROR) {
+				if (unlikely(flags & NV_TX2_ERROR)) {
 					if ((flags & NV_TX2_RETRYERROR)
 					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
@@ -2597,9 +2600,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
 			}
 		}
 		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
-			np->get_tx.orig = np->first_tx.orig;
+			np->get_tx.orig = np->tx_ring.orig;
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-			np->get_tx_ctx = np->first_tx_ctx;
+			np->get_tx_ctx = np->tx_skb;
 	}
 
 	netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2626,7 +2629,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
 		nv_unmap_txskb(np, np->get_tx_ctx);
 
 		if (flags & NV_TX2_LASTPACKET) {
-			if (flags & NV_TX2_ERROR) {
+			if (unlikely(flags & NV_TX2_ERROR)) {
 				if ((flags & NV_TX2_RETRYERROR)
 				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
 					if (np->driver_data & DEV_HAS_GEAR_MODE)
@@ -2651,9 +2654,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
 		}
 
 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
-			np->get_tx.ex = np->first_tx.ex;
+			np->get_tx.ex = np->tx_ring.ex;
 		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
-			np->get_tx_ctx = np->first_tx_ctx;
+			np->get_tx_ctx = np->tx_skb;
 	}
 
 	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
@@ -2909,7 +2912,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
 		u64_stats_update_end(&np->swstats_rx_syncp);
 next_pkt:
 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
-			np->get_rx.orig = np->first_rx.orig;
+			np->get_rx.orig = np->rx_ring.orig;
 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
 			np->get_rx_ctx = np->first_rx_ctx;
 
@@ -2998,7 +3001,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
 		}
 next_pkt:
 		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
-			np->get_rx.ex = np->first_rx.ex;
+			np->get_rx.ex = np->rx_ring.ex;
 		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
 			np->get_rx_ctx = np->first_rx_ctx;
 
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index c9a55b7..07a2eb3 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -212,9 +212,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
 		return -ENOENT;
 	}
 
-	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-		   &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
-	    != ETH_ALEN) {
+	if (!mac_pton(maddr, addr)) {
 		dev_warn(&pdev->dev,
 			 "can't parse mac address, not configuring\n");
 		return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 26ddf09..0ee2490 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -85,6 +85,7 @@
 	tristate "QLogic QED 25/40/100Gb core driver"
 	depends on PCI
 	select ZLIB_INFLATE
+	select CRC8
 	---help---
 	  This enables the support for ...
 
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 0a66389..1cd39c9 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -2502,12 +2502,10 @@ netxen_collect_minidump(struct netxen_adapter *adapter)
 {
 	int ret = 0;
 	struct netxen_minidump_template_hdr *hdr;
-	struct timespec val;
 	hdr = (struct netxen_minidump_template_hdr *)
 				adapter->mdump.md_template;
 	hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
-	jiffies_to_timespec(jiffies, &val);
-	hdr->driver_timestamp = (u32) val.tv_sec;
+	hdr->driver_timestamp = ktime_get_seconds();
 	hdr->driver_info_word2 = adapter->fw_version;
 	hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
 	ret = netxen_parse_md_template(adapter);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 91003bc..6948855 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -52,10 +52,10 @@
 
 extern const struct qed_common_ops qed_common_ops_pass;
 
-#define QED_MAJOR_VERSION               8
-#define QED_MINOR_VERSION               10
-#define QED_REVISION_VERSION            11
-#define QED_ENGINEERING_VERSION 21
+#define QED_MAJOR_VERSION		8
+#define QED_MINOR_VERSION		33
+#define QED_REVISION_VERSION		0
+#define QED_ENGINEERING_VERSION		20
 
 #define QED_VERSION						 \
 	((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
 	return sw_fid;
 }
 
-#define PURE_LB_TC 8
-#define PKT_LB_TC 9
+#define PKT_LB_TC	9
+#define MAX_NUM_VOQS_E4	20
 
 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index afd07ad..6f546e8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -86,22 +86,22 @@
 
 /* connection context union */
 union conn_context {
-	struct core_conn_context core_ctx;
-	struct eth_conn_context eth_ctx;
-	struct iscsi_conn_context iscsi_ctx;
-	struct fcoe_conn_context fcoe_ctx;
-	struct roce_conn_context roce_ctx;
+	struct e4_core_conn_context core_ctx;
+	struct e4_eth_conn_context eth_ctx;
+	struct e4_iscsi_conn_context iscsi_ctx;
+	struct e4_fcoe_conn_context fcoe_ctx;
+	struct e4_roce_conn_context roce_ctx;
 };
 
 /* TYPE-0 task context - iSCSI, FCOE */
 union type0_task_context {
-	struct iscsi_task_context iscsi_ctx;
-	struct fcoe_task_context fcoe_ctx;
+	struct e4_iscsi_task_context iscsi_ctx;
+	struct e4_fcoe_task_context fcoe_ctx;
 };
 
 /* TYPE-1 task context - ROCE */
 union type1_task_context {
-	struct rdma_task_context roce_ctx;
+	struct e4_rdma_task_context roce_ctx;
 };
 
 struct src_ent {
@@ -109,8 +109,8 @@ struct src_ent {
 	u64 next;
 };
 
-#define CDUT_SEG_ALIGNMET 3	/* in 4k chunks */
-#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+#define CDUT_SEG_ALIGNMET		3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES	BIT(CDUT_SEG_ALIGNMET + 12)
 
 #define CONN_CXT_SIZE(p_hwfn) \
 	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 	p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 
 	qed_cxt_qm_iids(p_hwfn, &qm_iids);
-	total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+	total = qed_qm_pf_mem_size(qm_iids.cids,
 				   qm_iids.vf_cids, qm_iids.tids,
 				   p_hwfn->qm_info.num_pqs,
 				   p_hwfn->qm_info.num_vf_pqs);
@@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
 		u32 size;
 
 		size = min_t(u32, sz_left, p_blk->real_size_in_page);
-		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-					    size, &p_phys, GFP_KERNEL);
+		p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+					     &p_phys, GFP_KERNEL);
 		if (!p_virt)
 			return -ENOMEM;
-		memset(p_virt, 0, size);
 
 		ilt_shadow[line].p_phys = p_phys;
 		ilt_shadow[line].p_virt = p_virt;
@@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
 	}
 }
 
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, bool is_pf_loading)
 {
-	struct qed_qm_pf_rt_init_params params;
 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+	struct qed_qm_pf_rt_init_params params;
+	struct qed_mcp_link_state *p_link;
 	struct qed_qm_iids iids;
 
 	memset(&iids, 0, sizeof(iids));
 	qed_cxt_qm_iids(p_hwfn, &iids);
 
+	p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
 	memset(&params, 0, sizeof(params));
 	params.port_id = p_hwfn->port_id;
 	params.pf_id = p_hwfn->rel_pf_id;
 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
-	params.is_first_pf = p_hwfn->first_on_engine;
+	params.is_pf_loading = is_pf_loading;
 	params.num_pf_cids = iids.cids;
 	params.num_vf_cids = iids.vf_cids;
 	params.num_tids = iids.tids;
@@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	params.num_vports = qm_info->num_vports;
 	params.pf_wfq = qm_info->pf_wfq;
 	params.pf_rl = qm_info->pf_rl;
+	params.link_speed = p_link->speed;
 	params.pq_params = qm_info->qm_pq_params;
 	params.vport_params = qm_info->qm_vport_params;
 
@@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
 
 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	qed_qm_init_pf(p_hwfn, p_ptt);
+	qed_qm_init_pf(p_hwfn, p_ptt, true);
 	qed_cm_init_pf(p_hwfn);
 	qed_dq_init_pf(p_hwfn);
 	qed_cdu_init_pf(p_hwfn);
@@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
 		goto out0;
 	}
 
-	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
-				    p_blk->real_size_in_page,
-				    &p_phys, GFP_KERNEL);
+	p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev,
+				     p_blk->real_size_in_page, &p_phys,
+				     GFP_KERNEL);
 	if (!p_virt) {
 		rc = -ENOMEM;
 		goto out1;
 	}
-	memset(p_virt, 0, p_blk->real_size_in_page);
 
 	/* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
 	 * to compensate for a HW bug, but it is configured even if DIF is not
@@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
 		for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
 			elem = (union type1_task_context *)elem_start;
 			SET_FIELD(elem->roce_ctx.tdif_context.flags1,
-				  TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+				  TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
 			elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
 		}
 	}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 1783634..a4e9586 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  *
  * @param p_hwfn
  * @param p_ptt
+ * @param is_pf_loading
  */
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, bool is_pf_loading);
 
 /**
  * @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index fe7c1f2..449777f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
 				   struct pf_update_ramrod_data *p_dest)
 {
 	struct protocol_dcb_data *p_dcb_data;
-	bool update_flag = false;
-
-	p_dest->pf_id = p_src->pf_id;
+	u8 update_flag;
 
 	update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
 	p_dest->update_fcoe_dcb_data_mode = update_flag;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 03c3cf7..f2633ec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -21,25 +21,26 @@ enum mem_groups {
 	MEM_GROUP_DMAE_MEM,
 	MEM_GROUP_CM_MEM,
 	MEM_GROUP_QM_MEM,
-	MEM_GROUP_TM_MEM,
+	MEM_GROUP_DORQ_MEM,
 	MEM_GROUP_BRB_RAM,
 	MEM_GROUP_BRB_MEM,
 	MEM_GROUP_PRS_MEM,
-	MEM_GROUP_SDM_MEM,
 	MEM_GROUP_IOR,
-	MEM_GROUP_RAM,
 	MEM_GROUP_BTB_RAM,
-	MEM_GROUP_RDIF_CTX,
-	MEM_GROUP_TDIF_CTX,
-	MEM_GROUP_CFC_MEM,
 	MEM_GROUP_CONN_CFC_MEM,
 	MEM_GROUP_TASK_CFC_MEM,
 	MEM_GROUP_CAU_PI,
 	MEM_GROUP_CAU_MEM,
 	MEM_GROUP_PXP_ILT,
+	MEM_GROUP_TM_MEM,
+	MEM_GROUP_SDM_MEM,
 	MEM_GROUP_PBUF,
+	MEM_GROUP_RAM,
 	MEM_GROUP_MULD_MEM,
 	MEM_GROUP_BTB_MEM,
+	MEM_GROUP_RDIF_CTX,
+	MEM_GROUP_TDIF_CTX,
+	MEM_GROUP_CFC_MEM,
 	MEM_GROUP_IGU_MEM,
 	MEM_GROUP_IGU_MSIX,
 	MEM_GROUP_CAU_SB,
@@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = {
 	"DMAE_MEM",
 	"CM_MEM",
 	"QM_MEM",
-	"TM_MEM",
+	"DORQ_MEM",
 	"BRB_RAM",
 	"BRB_MEM",
 	"PRS_MEM",
-	"SDM_MEM",
 	"IOR",
-	"RAM",
 	"BTB_RAM",
-	"RDIF_CTX",
-	"TDIF_CTX",
-	"CFC_MEM",
 	"CONN_CFC_MEM",
 	"TASK_CFC_MEM",
 	"CAU_PI",
 	"CAU_MEM",
 	"PXP_ILT",
+	"TM_MEM",
+	"SDM_MEM",
 	"PBUF",
+	"RAM",
 	"MULD_MEM",
 	"BTB_MEM",
+	"RDIF_CTX",
+	"TDIF_CTX",
+	"CFC_MEM",
 	"IGU_MEM",
 	"IGU_MSIX",
 	"CAU_SB",
@@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm)
 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
 }
 
-static u32 cond14(const u32 *r, const u32 *imm)
-{
-	return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
-}
-
 static u32 cond6(const u32 *r, const u32 *imm)
 {
 	return (r[0] & imm[0]) != imm[1];
@@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
 	cond11,
 	cond12,
 	cond13,
-	cond14,
 };
 
 /******************************* Data Types **********************************/
@@ -203,6 +199,8 @@ struct chip_defs {
 struct platform_defs {
 	const char *name;
 	u32 delay_factor;
+	u32 dmae_thresh;
+	u32 log_thresh;
 };
 
 /* Storm constant definitions.
@@ -234,7 +232,7 @@ struct storm_defs {
 /* Block constant definitions */
 struct block_defs {
 	const char *name;
-	bool has_dbg_bus[MAX_CHIP_IDS];
+	bool exists[MAX_CHIP_IDS];
 	bool associated_to_storm;
 
 	/* Valid only if associated_to_storm is true */
@@ -258,8 +256,8 @@ struct block_defs {
 /* Reset register definitions */
 struct reset_reg_defs {
 	u32 addr;
-	u32 unreset_val;
 	bool exists[MAX_CHIP_IDS];
+	u32 unreset_val[MAX_CHIP_IDS];
 };
 
 struct grc_param_defs {
@@ -276,8 +274,8 @@ struct rss_mem_defs {
 	const char *mem_name;
 	const char *type_name;
 	u32 addr;
+	u32 entry_width;
 	u32 num_entries[MAX_CHIP_IDS];
-	u32 entry_width[MAX_CHIP_IDS];
 };
 
 struct vfc_ram_defs {
@@ -294,7 +292,9 @@ struct big_ram_defs {
 	enum dbg_grc_params grc_param;
 	u32 addr_reg_addr;
 	u32 data_reg_addr;
-	u32 num_of_blocks[MAX_CHIP_IDS];
+	u32 is_256b_reg_addr;
+	u32 is_256b_bit_offset[MAX_CHIP_IDS];
+	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
 };
 
 struct phy_defs {
@@ -358,20 +358,14 @@ struct phy_defs {
 			(arr)[i] = qed_rd(dev, ptt, addr); \
 	} while (0)
 
-#ifndef DWORDS_TO_BYTES
 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
-#endif
-#ifndef BYTES_TO_DWORDS
 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
-#endif
 
-/* extra lines include a signature line + optional latency events line */
-#ifndef NUM_DBG_LINES
+/* Extra lines include a signature line + optional latency events line */
 #define NUM_EXTRA_DBG_LINES(block_desc) \
 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
 #define NUM_DBG_LINES(block_desc) \
 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
-#endif
 
 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
 #define RAM_LINES_TO_BYTES(lines) \
@@ -424,9 +418,6 @@ struct phy_defs {
 #define NUM_RSS_MEM_TYPES		5
 
 #define NUM_BIG_RAM_TYPES		3
-#define BIG_RAM_BLOCK_SIZE_BYTES	128
-#define BIG_RAM_BLOCK_SIZE_DWORDS \
-	BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
 
 #define NUM_PHY_TBUS_ADDRESSES		2048
 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
@@ -441,23 +432,17 @@ struct phy_defs {
 
 #define FW_IMG_MAIN			1
 
-#ifndef REG_FIFO_ELEMENT_DWORDS
 #define REG_FIFO_ELEMENT_DWORDS		2
-#endif
 #define REG_FIFO_DEPTH_ELEMENTS		32
 #define REG_FIFO_DEPTH_DWORDS \
 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
 
-#ifndef IGU_FIFO_ELEMENT_DWORDS
 #define IGU_FIFO_ELEMENT_DWORDS		4
-#endif
 #define IGU_FIFO_DEPTH_ELEMENTS		64
 #define IGU_FIFO_DEPTH_DWORDS \
 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
 
-#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
-#endif
 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
@@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
 	  {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
 	   {0, 0, 0},
 	   {0, 0, 0},
+	   {0, 0, 0} } },
+	{ "reserved",
+	   {{0, 0, 0},
+	   {0, 0, 0},
+	   {0, 0, 0},
 	   {0, 0, 0} } }
 };
 
@@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
 static struct storm_defs s_storm_defs[] = {
 	/* Tstorm */
 	{'T', BLOCK_TSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+	  DBG_BUS_CLIENT_RBCT}, true,
 	 TSEM_REG_FAST_MEMORY,
 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Mstorm */
 	{'M', BLOCK_MSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
+	  DBG_BUS_CLIENT_RBCM}, false,
 	 MSEM_REG_FAST_MEMORY,
 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Ustorm */
 	{'U', BLOCK_USEM,
-	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
+	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+	  DBG_BUS_CLIENT_RBCU}, false,
 	 USEM_REG_FAST_MEMORY,
 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Xstorm */
 	{'X', BLOCK_XSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+	  DBG_BUS_CLIENT_RBCX}, false,
 	 XSEM_REG_FAST_MEMORY,
 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Ystorm */
 	{'Y', BLOCK_YSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
+	  DBG_BUS_CLIENT_RBCY}, false,
 	 YSEM_REG_FAST_MEMORY,
 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = {
 
 	/* Pstorm */
 	{'P', BLOCK_PSEM,
-	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
+	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+	  DBG_BUS_CLIENT_RBCS}, true,
 	 PSEM_REG_FAST_MEMORY,
 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
@@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = {
 
 static struct block_defs block_grc_defs = {
 	"grc",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
 	GRC_REG_DBG_FORCE_FRAME,
@@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = {
 };
 
 static struct block_defs block_miscs_defs = {
-	"miscs", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"miscs", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_defs = {
-	"misc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbu_defs = {
-	"dbu", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbu", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_pglue_b_defs = {
 	"pglue_b",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
 	PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = {
 
 static struct block_defs block_cnig_defs = {
 	"cnig",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-	CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
-	CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
-	CNIG_REG_DBG_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
+	 DBG_BUS_CLIENT_RBCW},
+	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
+	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
+	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
 };
 
 static struct block_defs block_cpmu_defs = {
-	"cpmu", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"cpmu", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 };
 
 static struct block_defs block_ncsi_defs = {
 	"ncsi",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
 	NCSI_REG_DBG_FORCE_FRAME,
@@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = {
 };
 
 static struct block_defs block_opte_defs = {
-	"opte", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"opte", {true, true, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 };
 
 static struct block_defs block_bmb_defs = {
 	"bmb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
 	BMB_REG_DBG_FORCE_FRAME,
@@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = {
 
 static struct block_defs block_pcie_defs = {
 	"pcie",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-	PCIE_REG_DBG_COMMON_SELECT_K2,
-	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
-	PCIE_REG_DBG_COMMON_SHIFT_K2,
-	PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
-	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+	 DBG_BUS_CLIENT_RBCH},
+	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp_defs = {
-	"mcp", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"mcp", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp2_defs = {
 	"mcp2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
 	MCP2_REG_DBG_FORCE_FRAME,
@@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = {
 
 static struct block_defs block_pswhst_defs = {
 	"pswhst",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
 	PSWHST_REG_DBG_FORCE_FRAME,
@@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = {
 
 static struct block_defs block_pswhst2_defs = {
 	"pswhst2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
 	PSWHST2_REG_DBG_FORCE_FRAME,
@@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = {
 
 static struct block_defs block_pswrd_defs = {
 	"pswrd",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
 	PSWRD_REG_DBG_FORCE_FRAME,
@@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = {
 
 static struct block_defs block_pswrd2_defs = {
 	"pswrd2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
 	PSWRD2_REG_DBG_FORCE_FRAME,
@@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = {
 
 static struct block_defs block_pswwr_defs = {
 	"pswwr",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
 	PSWWR_REG_DBG_FORCE_FRAME,
@@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = {
 };
 
 static struct block_defs block_pswwr2_defs = {
-	"pswwr2", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"pswwr2", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
 };
 
 static struct block_defs block_pswrq_defs = {
 	"pswrq",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
 	PSWRQ_REG_DBG_FORCE_FRAME,
@@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = {
 
 static struct block_defs block_pswrq2_defs = {
 	"pswrq2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
 	PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = {
 
 static struct block_defs block_pglcs_defs = {
 	"pglcs",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-	PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
-	PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
-	PGLCS_REG_DBG_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+	 DBG_BUS_CLIENT_RBCH},
+	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
+	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
+	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
 };
 
 static struct block_defs block_ptu_defs = {
 	"ptu",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
 	PTU_REG_DBG_FORCE_FRAME,
@@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = {
 
 static struct block_defs block_dmae_defs = {
 	"dmae",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
 	DMAE_REG_DBG_FORCE_FRAME,
@@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = {
 
 static struct block_defs block_tcm_defs = {
 	"tcm",
-	{true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
 	TCM_REG_DBG_FORCE_FRAME,
@@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = {
 
 static struct block_defs block_mcm_defs = {
 	"mcm",
-	{true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
 	MCM_REG_DBG_FORCE_FRAME,
@@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = {
 
 static struct block_defs block_ucm_defs = {
 	"ucm",
-	{true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
 	UCM_REG_DBG_FORCE_FRAME,
@@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = {
 
 static struct block_defs block_xcm_defs = {
 	"xcm",
-	{true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
 	XCM_REG_DBG_FORCE_FRAME,
@@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = {
 
 static struct block_defs block_ycm_defs = {
 	"ycm",
-	{true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
 	YCM_REG_DBG_FORCE_FRAME,
@@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = {
 
 static struct block_defs block_pcm_defs = {
 	"pcm",
-	{true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
 	PCM_REG_DBG_FORCE_FRAME,
@@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = {
 
 static struct block_defs block_qm_defs = {
 	"qm",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
 	QM_REG_DBG_FORCE_FRAME,
@@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = {
 
 static struct block_defs block_tm_defs = {
 	"tm",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
 	TM_REG_DBG_FORCE_FRAME,
@@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = {
 
 static struct block_defs block_dorq_defs = {
 	"dorq",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
 	DORQ_REG_DBG_FORCE_FRAME,
@@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = {
 
 static struct block_defs block_brb_defs = {
 	"brb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
 	BRB_REG_DBG_FORCE_FRAME,
@@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = {
 
 static struct block_defs block_src_defs = {
 	"src",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
 	SRC_REG_DBG_FORCE_FRAME,
@@ -910,8 +909,8 @@ static struct block_defs block_src_defs = {
 
 static struct block_defs block_prs_defs = {
 	"prs",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
 	PRS_REG_DBG_FORCE_FRAME,
@@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = {
 
 static struct block_defs block_tsdm_defs = {
 	"tsdm",
-	{true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
 	TSDM_REG_DBG_FORCE_FRAME,
@@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = {
 
 static struct block_defs block_msdm_defs = {
 	"msdm",
-	{true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
 	MSDM_REG_DBG_FORCE_FRAME,
@@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = {
 
 static struct block_defs block_usdm_defs = {
 	"usdm",
-	{true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
 	USDM_REG_DBG_FORCE_FRAME,
@@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = {
 
 static struct block_defs block_xsdm_defs = {
 	"xsdm",
-	{true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
 	XSDM_REG_DBG_FORCE_FRAME,
@@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = {
 
 static struct block_defs block_ysdm_defs = {
 	"ysdm",
-	{true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
 	YSDM_REG_DBG_FORCE_FRAME,
@@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = {
 
 static struct block_defs block_psdm_defs = {
 	"psdm",
-	{true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
 	PSDM_REG_DBG_FORCE_FRAME,
@@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = {
 
 static struct block_defs block_tsem_defs = {
 	"tsem",
-	{true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
 	TSEM_REG_DBG_FORCE_FRAME,
@@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = {
 
 static struct block_defs block_msem_defs = {
 	"msem",
-	{true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
 	MSEM_REG_DBG_FORCE_FRAME,
@@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = {
 
 static struct block_defs block_usem_defs = {
 	"usem",
-	{true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
 	USEM_REG_DBG_FORCE_FRAME,
@@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = {
 
 static struct block_defs block_xsem_defs = {
 	"xsem",
-	{true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
 	XSEM_REG_DBG_FORCE_FRAME,
@@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = {
 
 static struct block_defs block_ysem_defs = {
 	"ysem",
-	{true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	{true, true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
 	YSEM_REG_DBG_FORCE_FRAME,
@@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = {
 
 static struct block_defs block_psem_defs = {
 	"psem",
-	{true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
 	PSEM_REG_DBG_FORCE_FRAME,
@@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = {
 
 static struct block_defs block_rss_defs = {
 	"rss",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
 	RSS_REG_DBG_FORCE_FRAME,
@@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = {
 
 static struct block_defs block_tmld_defs = {
 	"tmld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
 	TMLD_REG_DBG_FORCE_FRAME,
@@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = {
 
 static struct block_defs block_muld_defs = {
 	"muld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
 	MULD_REG_DBG_FORCE_FRAME,
@@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = {
 
 static struct block_defs block_yuld_defs = {
 	"yuld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	{true, true, false}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+	 MAX_DBG_BUS_CLIENTS},
 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
@@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = {
 
 static struct block_defs block_xyld_defs = {
 	"xyld",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
 	XYLD_REG_DBG_FORCE_FRAME,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
 };
 
+static struct block_defs block_ptld_defs = {
+	"ptld",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
+	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
+	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
+	PTLD_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+	28
+};
+
+static struct block_defs block_ypld_defs = {
+	"ypld",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
+	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
+	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
+	YPLD_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+	27
+};
+
 static struct block_defs block_prm_defs = {
 	"prm",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
 	PRM_REG_DBG_FORCE_FRAME,
@@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = {
 
 static struct block_defs block_pbf_pb1_defs = {
 	"pbf_pb1",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
 	PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = {
 
 static struct block_defs block_pbf_pb2_defs = {
 	"pbf_pb2",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
 	PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = {
 
 static struct block_defs block_rpb_defs = {
 	"rpb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
 	RPB_REG_DBG_FORCE_FRAME,
@@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = {
 
 static struct block_defs block_btb_defs = {
 	"btb",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
 	BTB_REG_DBG_FORCE_FRAME,
@@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = {
 
 static struct block_defs block_pbf_defs = {
 	"pbf",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
 	PBF_REG_DBG_FORCE_FRAME,
@@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = {
 
 static struct block_defs block_rdif_defs = {
 	"rdif",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
 	RDIF_REG_DBG_FORCE_FRAME,
@@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = {
 
 static struct block_defs block_tdif_defs = {
 	"tdif",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
 	TDIF_REG_DBG_FORCE_FRAME,
@@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = {
 
 static struct block_defs block_cdu_defs = {
 	"cdu",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
 	CDU_REG_DBG_FORCE_FRAME,
@@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = {
 
 static struct block_defs block_ccfc_defs = {
 	"ccfc",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
 	CCFC_REG_DBG_FORCE_FRAME,
@@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = {
 
 static struct block_defs block_tcfc_defs = {
 	"tcfc",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
 	TCFC_REG_DBG_FORCE_FRAME,
@@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = {
 
 static struct block_defs block_igu_defs = {
 	"igu",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
 	IGU_REG_DBG_FORCE_FRAME,
@@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = {
 
 static struct block_defs block_cau_defs = {
 	"cau",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
 	CAU_REG_DBG_FORCE_FRAME,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
 };
 
+static struct block_defs block_rgfs_defs = {
+	"rgfs", {false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
+};
+
+static struct block_defs block_rgsrc_defs = {
+	"rgsrc",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
+	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
+	RGSRC_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+	30
+};
+
+static struct block_defs block_tgfs_defs = {
+	"tgfs", {false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
+};
+
+static struct block_defs block_tgsrc_defs = {
+	"tgsrc",
+	{false, false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
+	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
+	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
+	TGSRC_REG_DBG_FORCE_FRAME_E5,
+	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+	31
+};
+
 static struct block_defs block_umac_defs = {
 	"umac",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-	UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
-	UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
-	UMAC_REG_DBG_FORCE_FRAME_K2,
+	{true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
+	 DBG_BUS_CLIENT_RBCZ},
+	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
+	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
+	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
 };
 
 static struct block_defs block_xmac_defs = {
-	"xmac", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"xmac", {true, false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbg_defs = {
-	"dbg", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbg", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
 };
 
 static struct block_defs block_nig_defs = {
 	"nig",
-	{true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	{true, true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
 	NIG_REG_DBG_FORCE_FRAME,
@@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = {
 
 static struct block_defs block_wol_defs = {
 	"wol",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-	WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
-	WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
-	WOL_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
+	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
+	WOL_REG_DBG_FORCE_FRAME_K2_E5,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
 };
 
 static struct block_defs block_bmbn_defs = {
 	"bmbn",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
-	BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
-	BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
-	BMBN_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
+	 DBG_BUS_CLIENT_RBCB},
+	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
+	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
+	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_ipc_defs = {
-	"ipc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"ipc", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
 };
 
 static struct block_defs block_nwm_defs = {
 	"nwm",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-	NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
-	NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
-	NWM_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
+	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
+	NWM_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
 };
 
 static struct block_defs block_nws_defs = {
 	"nws",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
-	NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
-	NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
-	NWS_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
+	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
+	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
+	NWS_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
 };
 
 static struct block_defs block_ms_defs = {
 	"ms",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
-	MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
-	MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
-	MS_REG_DBG_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
+	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
+	MS_REG_DBG_FORCE_FRAME_K2_E5,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
 };
 
 static struct block_defs block_phy_pcie_defs = {
 	"phy_pcie",
-	{false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
-	PCIE_REG_DBG_COMMON_SELECT_K2,
-	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
-	PCIE_REG_DBG_COMMON_SHIFT_K2,
-	PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
-	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
+	{false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
+	 DBG_BUS_CLIENT_RBCH},
+	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
+	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
+	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
+	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_led_defs = {
-	"led", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"led", {false, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
 };
 
 static struct block_defs block_avs_wrap_defs = {
-	"avs_wrap", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"avs_wrap", {false, true, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
 };
 
-static struct block_defs block_rgfs_defs = {
-	"rgfs", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_rgsrc_defs = {
-	"rgsrc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgfs_defs = {
-	"tgfs", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_tgsrc_defs = {
-	"tgsrc", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ptld_defs = {
-	"ptld", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
-	false, false, MAX_DBG_RESET_REGS, 0
-};
-
-static struct block_defs block_ypld_defs = {
-	"ypld", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+static struct block_defs block_pxpreqbus_defs = {
+	"pxpreqbus", {false, false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_aeu_defs = {
-	"misc_aeu", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc_aeu", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_bar0_map_defs = {
-	"bar0_map", {false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"bar0_map", {true, true, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
@@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
 	&block_phy_pcie_defs,
 	&block_led_defs,
 	&block_avs_wrap_defs,
+	&block_pxpreqbus_defs,
 	&block_misc_aeu_defs,
 	&block_bar0_map_defs,
 };
 
 static struct platform_defs s_platform_defs[] = {
-	{"asic", 1},
-	{"reserved", 0},
-	{"reserved2", 0},
-	{"reserved3", 0}
+	{"asic", 1, 256, 32768},
+	{"reserved", 0, 0, 0},
+	{"reserved2", 0, 0, 0},
+	{"reserved3", 0, 0, 0}
 };
 
 static struct grc_param_defs s_grc_param_defs[] = {
 	/* DBG_GRC_PARAM_DUMP_TSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_MSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_USTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_XSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_YSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PSTORM */
-	{{1, 1}, 0, 1, false, 1, 1},
+	{{1, 1, 1}, 0, 1, false, 1, 1},
 
 	/* DBG_GRC_PARAM_DUMP_REGS */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_RAM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PBUF */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_IOR */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_VFC */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_ILT */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_RSS */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_CAU */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_QM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_MCP */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_RESERVED */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_CFC */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_IGU */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_BRB */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_BTB */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_BMB */
-	{{0, 0}, 0, 1, false, 0, 1},
+	{{0, 0, 0}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_NIG */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_MULD */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PRS */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_DMAE */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_TM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_SDM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_DIF */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_STATIC */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_UNSTALL */
-	{{0, 0}, 0, 1, false, 0, 0},
+	{{0, 0, 0}, 0, 1, false, 0, 0},
 
 	/* DBG_GRC_PARAM_NUM_LCIDS */
-	{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
 	 MAX_LCIDS},
 
 	/* DBG_GRC_PARAM_NUM_LTIDS */
-	{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
 	 MAX_LTIDS},
 
 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
-	{{0, 0}, 0, 1, true, 0, 0},
+	{{0, 0, 0}, 0, 1, true, 0, 0},
 
 	/* DBG_GRC_PARAM_CRASH */
-	{{0, 0}, 0, 1, true, 0, 0},
+	{{0, 0, 0}, 0, 1, true, 0, 0},
 
 	/* DBG_GRC_PARAM_PARITY_SAFE */
-	{{0, 0}, 0, 1, false, 1, 0},
+	{{0, 0, 0}, 0, 1, false, 1, 0},
 
 	/* DBG_GRC_PARAM_DUMP_CM */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_DUMP_PHY */
-	{{1, 1}, 0, 1, false, 0, 1},
+	{{1, 1, 1}, 0, 1, false, 0, 1},
 
 	/* DBG_GRC_PARAM_NO_MCP */
-	{{0, 0}, 0, 1, false, 0, 0},
+	{{0, 0, 0}, 0, 1, false, 0, 0},
 
 	/* DBG_GRC_PARAM_NO_FW_VER */
-	{{0, 0}, 0, 1, false, 0, 0}
+	{{0, 0, 0}, 0, 1, false, 0, 0}
 };
 
 static struct rss_mem_defs s_rss_mem_defs[] = {
-	{ "rss_mem_cid", "rss_cid", 0,
-	  {256, 320},
-	  {32, 32} },
+	{ "rss_mem_cid", "rss_cid", 0, 32,
+	  {256, 320, 512} },
 
-	{ "rss_mem_key_msb", "rss_key", 1024,
-	  {128, 208},
-	  {256, 256} },
+	{ "rss_mem_key_msb", "rss_key", 1024, 256,
+	  {128, 208, 257} },
 
-	{ "rss_mem_key_lsb", "rss_key", 2048,
-	  {128, 208},
-	  {64, 64} },
+	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
+	  {128, 208, 257} },
 
-	{ "rss_mem_info", "rss_info", 3072,
-	  {128, 208},
-	  {16, 16} },
+	{ "rss_mem_info", "rss_info", 3072, 16,
+	  {128, 208, 256} },
 
-	{ "rss_mem_ind", "rss_ind", 4096,
-	  {16384, 26624},
-	  {16, 16} }
+	{ "rss_mem_ind", "rss_ind", 4096, 16,
+	  {16384, 26624, 32768} }
 };
 
 static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
 static struct big_ram_defs s_big_ram_defs[] = {
 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
-	  {4800, 5632} },
+	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
+	  {153600, 180224, 282624} },
 
 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
-	  {2880, 3680} },
+	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
+	  {92160, 117760, 168960} },
 
 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
-	  {1152, 1152} }
+	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
+	  {36864, 36864, 36864} }
 };
 
 static struct reset_reg_defs s_reset_regs_defs[] = {
 	/* DBG_RESET_REG_MISCS_PL_UA */
-	{ MISCS_REG_RESET_PL_UA, 0x0,
-	  {true, true} },
+	{ MISCS_REG_RESET_PL_UA,
+	  {true, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISCS_PL_HV */
-	{ MISCS_REG_RESET_PL_HV, 0x0,
-	  {true, true} },
+	{ MISCS_REG_RESET_PL_HV,
+	  {true, true, true}, {0x0, 0x400, 0x600} },
 
 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
-	{ MISCS_REG_RESET_PL_HV_2_K2, 0x0,
-	  {false, true} },
+	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
+	  {false, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISC_PL_UA */
-	{ MISC_REG_RESET_PL_UA, 0x0,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_UA,
+	  {true, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISC_PL_HV */
-	{ MISC_REG_RESET_PL_HV, 0x0,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_HV,
+	  {true, true, true}, {0x0, 0x0, 0x0} },
 
 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
-	{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
+	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
 
 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
-	{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
+	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
 
 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
-	{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
-	  {true, true} },
+	{ MISC_REG_RESET_PL_PDA_VAUX,
+	  {true, true, true}, {0x2, 0x2, 0x2} },
 };
 
 static struct phy_defs s_phy_defs[] = {
 	{"nw_phy", NWS_REG_NWS_CMU_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
-	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
-	{"sgmii_phy", MS_REG_MS_CMU_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
-	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
-	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
-	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
-	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
+	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
+	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
+	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
 };
 
 /**************************** Private Functions ******************************/
@@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
 	/* Initializes the GRC parameters */
 	qed_dbg_grc_init_params(p_hwfn);
 
-	dev_data->initialized = true;
+	dev_data->use_dmae = true;
+	dev_data->num_regs_read = 0;
+	dev_data->initialized = 1;
 
 	return DBG_STATUS_OK;
 }
@@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
 	 * The address is located in the last line of the Storm RAM.
 	 */
 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
-	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
 	       sizeof(fw_info_location);
 	dest = (u32 *)&fw_info_location;
 
@@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
 /* Writes the "last" section (including CRC) to the specified buffer at the
  * given offset. Returns the dumped size in dwords.
  */
-static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
-				 u32 *dump_buf, u32 offset, bool dump)
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
 {
 	u32 start_offset = offset;
 
@@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
 	case MEM_GROUP_CFC_MEM:
 	case MEM_GROUP_CONN_CFC_MEM:
 	case MEM_GROUP_TASK_CFC_MEM:
-		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
 	case MEM_GROUP_IGU_MEM:
 	case MEM_GROUP_IGU_MSIX:
 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
@@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
 		struct block_defs *block = s_block_defs[block_id];
 
-		if (block->has_reset_bit && block->unreset)
+		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
+		    block->unreset)
 			reg_val[block->reset_reg] |=
 			    BIT(block->reset_bit_offset);
 	}
@@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
 			continue;
 
-		reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+		reg_val[i] |=
+			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
 
 		if (reg_val[i])
 			qed_wr(p_hwfn,
@@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
 	return offset;
 }
 
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+	u32 i;
+
+	for (i = 0; i < len; i++)
+		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
 /* Dumps the GRC registers in the specified address range.
  * Returns the dumped size in dwords.
  * The addr and len arguments are specified in dwords.
@@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
 				   u32 *dump_buf,
 				   bool dump, u32 addr, u32 len, bool wide_bus)
 {
-	u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 
 	if (!dump)
 		return len;
 
-	for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
-		*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+	/* Print log if needed */
+	dev_data->num_regs_read += len;
+	if (dev_data->num_regs_read >=
+	    s_platform_defs[dev_data->platform_id].log_thresh) {
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_DEBUG,
+			   "Dumping %d registers...\n",
+			   dev_data->num_regs_read);
+		dev_data->num_regs_read = 0;
+	}
 
-	return offset;
+	/* Try reading using DMAE */
+	if (dev_data->use_dmae &&
+	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
+	     wide_bus)) {
+		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
+				       (u64)(uintptr_t)(dump_buf), len, 0))
+			return len;
+		dev_data->use_dmae = 0;
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_DEBUG,
+			   "Failed reading from chip using DMAE, using GRC instead\n");
+	}
+
+	/* Read registers */
+	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+	return len;
 }
 
 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
@@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 	chip = &s_chip_defs[dev_data->chip_id];
 	chip_platform = &chip->per_platform[dev_data->platform_id];
 
-	if (dump)
-		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
-
 	while (input_offset <
 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
 		const struct dbg_dump_split_hdr *split_hdr;
@@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 
 		offset += qed_dump_str_param(dump_buf + offset,
 					     dump, "name", buf);
-		if (dump)
-			DP_VERBOSE(p_hwfn,
-				   QED_MSG_DEBUG,
-				   "Dumping %d registers from %s...\n",
-				   len, buf);
 	} else {
 		/* Dump address */
 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
 
 		offset += qed_dump_num_param(dump_buf + offset,
 					     dump, "addr", addr_in_bytes);
-		if (dump && len > 64)
-			DP_VERBOSE(p_hwfn,
-				   QED_MSG_DEBUG,
-				   "Dumping %d registers from address 0x%x...\n",
-				   len, addr_in_bytes);
 	}
 
 	/* Dump len */
@@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 	u8 rss_mem_id;
 
 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
-		u32 rss_addr, num_entries, entry_width, total_dwords, i;
+		u32 rss_addr, num_entries, total_dwords;
 		struct rss_mem_defs *rss_defs;
-		u32 addr, size;
+		u32 addr, num_dwords_to_read;
 		bool packed;
 
 		rss_defs = &s_rss_mem_defs[rss_mem_id];
 		rss_addr = rss_defs->addr;
 		num_entries = rss_defs->num_entries[dev_data->chip_id];
-		entry_width = rss_defs->entry_width[dev_data->chip_id];
-		total_dwords = (num_entries * entry_width) / 32;
-		packed = (entry_width == 16);
+		total_dwords = (num_entries * rss_defs->entry_width) / 32;
+		packed = (rss_defs->entry_width == 16);
 
 		offset += qed_grc_dump_mem_hdr(p_hwfn,
 					       dump_buf + offset,
@@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 					       rss_defs->mem_name,
 					       0,
 					       total_dwords,
-					       entry_width,
+					       rss_defs->entry_width,
 					       packed,
 					       rss_defs->type_name, false, 0);
 
@@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 		}
 
 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
-		size = RSS_REG_RSS_RAM_DATA_SIZE;
-		for (i = 0; i < total_dwords; i += size, rss_addr++) {
+		while (total_dwords) {
+			num_dwords_to_read = min_t(u32,
+						   RSS_REG_RSS_RAM_DATA_SIZE,
+						   total_dwords);
 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
 			offset += qed_grc_dump_addr_range(p_hwfn,
 							  p_ptt,
 							  dump_buf + offset,
 							  dump,
 							  addr,
-							  size,
+							  num_dwords_to_read,
 							  false);
+			total_dwords -= num_dwords_to_read;
+			rss_addr++;
 		}
 	}
 
@@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 				u32 *dump_buf, bool dump, u8 big_ram_id)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 total_blocks, ram_size, offset = 0, i;
+	u32 block_size, ram_size, offset = 0, reg_val, i;
 	char mem_name[12] = "???_BIG_RAM";
 	char type_name[8] = "???_RAM";
 	struct big_ram_defs *big_ram;
 
 	big_ram = &s_big_ram_defs[big_ram_id];
-	total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
-	ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+	ram_size = big_ram->ram_size[dev_data->chip_id];
+
+	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+	block_size = reg_val &
+		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+									 : 128;
 
 	strncpy(type_name, big_ram->instance_name,
 		strlen(big_ram->instance_name));
@@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 				       mem_name,
 				       0,
 				       ram_size,
-				       BIG_RAM_BLOCK_SIZE_BYTES * 8,
+				       block_size * 8,
 				       false, type_name, false, 0);
 
 	/* Read and dump Big RAM data */
@@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 		return offset + ram_size;
 
 	/* Dump Big RAM */
-	for (i = 0; i < total_blocks / 2; i++) {
+	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+	     i++) {
 		u32 addr, len;
 
 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
-		len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+		len = BRB_REG_BIG_RAM_DATA_SIZE;
 		offset += qed_grc_dump_addr_range(p_hwfn,
 						  p_ptt,
 						  dump_buf + offset,
@@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 				   dump,
 				   NULL,
 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
-				   MCP_REG_SCRATCH_SIZE,
+				   MCP_REG_SCRATCH_SIZE_BB_K2,
 				   false, 0, false, "MCP", false, 0);
 
 	/* Dump MCP cpu_reg_file */
@@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
 			       phy_defs->tbus_data_lo_addr;
 		data_hi_addr = phy_defs->base_addr +
 			       phy_defs->tbus_data_hi_addr;
-		bytes_buf = (u8 *)(dump_buf + offset);
 
 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
 			     phy_defs->phy_name) < 0)
@@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
 			continue;
 		}
 
+		bytes_buf = (u8 *)(dump_buf + offset);
 		for (tbus_hi_offset = 0;
 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
 		     tbus_hi_offset++) {
@@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 	u32 block_id, line_id, offset = 0;
 
-	/* Skip static debug if a debug bus recording is in progress */
-	if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+	/* Don't dump static debug if a debug bus recording is in progress */
+	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
 		return 0;
 
 	if (dump) {
-		DP_VERBOSE(p_hwfn,
-			   QED_MSG_DEBUG, "Dumping static debug data...\n");
-
 		/* Disable all blocks debug output */
 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
 			struct block_defs *block = s_block_defs[block_id];
 
-			if (block->has_dbg_bus[dev_data->chip_id])
+			if (block->dbg_client_id[dev_data->chip_id] !=
+			    MAX_DBG_BUS_CLIENTS)
 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
 				       0);
 		}
@@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 		u32 block_dwords, addr, len;
 		u8 dbg_client_id;
 
-		if (!block->has_dbg_bus[dev_data->chip_id])
+		if (block->dbg_client_id[dev_data->chip_id] ==
+		    MAX_DBG_BUS_CLIENTS)
 			continue;
 
-		block_desc =
-			get_dbg_bus_block_desc(p_hwfn,
-					       (enum block_id)block_id);
+		block_desc = get_dbg_bus_block_desc(p_hwfn,
+						    (enum block_id)block_id);
 		block_dwords = NUM_DBG_LINES(block_desc) *
 			       STATIC_DEBUG_LINE_DWORDS;
 
@@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 						    dump_buf + offset, dump);
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	if (dump) {
 		/* Unstall storms */
@@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		if (!check_rule && dump)
 			continue;
 
+		if (!dump) {
+			u32 entry_dump_size =
+				qed_idle_chk_dump_failure(p_hwfn,
+							  p_ptt,
+							  dump_buf + offset,
+							  false,
+							  rule->rule_id,
+							  rule,
+							  0,
+							  NULL);
+
+			offset += num_reg_entries * entry_dump_size;
+			(*num_failing_rules) += num_reg_entries;
+			continue;
+		}
+
 		/* Go over all register entries (number of entries is the same
 		 * for all condition registers).
 		 */
 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
 			u32 next_reg_offset = 0;
 
-			if (!dump) {
-				offset += qed_idle_chk_dump_failure(p_hwfn,
-							p_ptt,
-							dump_buf + offset,
-							false,
-							rule->rule_id,
-							rule,
-							entry_id,
-							NULL);
-				(*num_failing_rules)++;
-				break;
-			}
-
 			/* Read current entry of all condition registers */
 			for (reg_id = 0; reg_id < rule->num_cond_regs;
 			     reg_id++) {
 				const struct dbg_idle_chk_cond_reg *reg =
-				    &cond_regs[reg_id];
+					&cond_regs[reg_id];
 				u32 padded_entry_size, addr;
 				bool wide_bus;
 
@@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 				if (reg->num_entries > 1 ||
 				    reg->start_entry > 0) {
 					padded_entry_size =
-					    reg->entry_size > 1 ?
-					    roundup_pow_of_two(reg->entry_size)
-					    : 1;
+					   reg->entry_size > 1 ?
+					   roundup_pow_of_two(reg->entry_size) :
+					   1;
 					addr += (reg->start_entry + entry_id) *
 						padded_entry_size;
 				}
@@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 							entry_id,
 							cond_reg_values);
 				(*num_failing_rules)++;
-				break;
 			}
 		}
 	}
@@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
 				   dump, "num_rules", num_failing_rules);
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	return offset;
 }
@@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
 				       (nvram_offset_bytes +
 					read_offset) |
 				       (bytes_to_copy <<
-					DRV_MB_PARAM_NVM_LEN_SHIFT),
+					DRV_MB_PARAM_NVM_LEN_OFFSET),
 				       &ret_mcp_resp, &ret_mcp_param,
 				       &ret_read_size,
 				       (u32 *)((u8 *)ret_buf + read_offset)))
@@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 		offset += trace_meta_size_dwords;
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 					 u32 *dump_buf,
 					 bool dump, u32 *num_dumped_dwords)
 {
-	u32 dwords_read, size_param_offset, offset = 0;
+	u32 dwords_read, size_param_offset, offset = 0, addr, len;
 	bool fifo_has_data;
 
 	*num_dumped_dwords = 0;
@@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 	 * buffer size since more entries could be added to the buffer as we are
 	 * emptying it.
 	 */
+	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+	len = REG_FIFO_ELEMENT_DWORDS;
 	for (dwords_read = 0;
 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
-	     dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
-	     REG_FIFO_ELEMENT_DWORDS) {
-		if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
-				      (u64)(uintptr_t)(&dump_buf[offset]),
-				      REG_FIFO_ELEMENT_DWORDS, 0))
-			return DBG_STATUS_DMAE_FAILED;
+	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+		offset += qed_grc_dump_addr_range(p_hwfn,
+						  p_ptt,
+						  dump_buf + offset,
+						  true,
+						  addr,
+						  len,
+						  true);
 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
 	}
@@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 			   dwords_read);
 out:
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 					 u32 *dump_buf,
 					 bool dump, u32 *num_dumped_dwords)
 {
-	u32 dwords_read, size_param_offset, offset = 0;
+	u32 dwords_read, size_param_offset, offset = 0, addr, len;
 	bool fifo_has_data;
 
 	*num_dumped_dwords = 0;
@@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 	 * buffer size since more entries could be added to the buffer as we are
 	 * emptying it.
 	 */
+	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+	len = IGU_FIFO_ELEMENT_DWORDS;
 	for (dwords_read = 0;
 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
-	     dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
-	     IGU_FIFO_ELEMENT_DWORDS) {
-		if (qed_dmae_grc2host(p_hwfn, p_ptt,
-				      IGU_REG_ERROR_HANDLING_MEMORY,
-				      (u64)(uintptr_t)(&dump_buf[offset]),
-				      IGU_FIFO_ELEMENT_DWORDS, 0))
-			return DBG_STATUS_DMAE_FAILED;
-		fifo_has_data =	qed_rd(p_hwfn, p_ptt,
+	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+		offset += qed_grc_dump_addr_range(p_hwfn,
+						  p_ptt,
+						  dump_buf + offset,
+						  true,
+						  addr,
+						  len,
+						  true);
+		fifo_has_data = qed_rd(p_hwfn, p_ptt,
 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
 	}
 
@@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 			   dwords_read);
 out:
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
 						    bool dump,
 						    u32 *num_dumped_dwords)
 {
-	u32 size_param_offset, override_window_dwords, offset = 0;
+	u32 size_param_offset, override_window_dwords, offset = 0, addr;
 
 	*num_dumped_dwords = 0;
 
@@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
 
 	/* Add override window info to buffer */
 	override_window_dwords =
-		qed_rd(p_hwfn, p_ptt,
-		       GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
-		       PROTECTION_OVERRIDE_ELEMENT_DWORDS;
-	if (qed_dmae_grc2host(p_hwfn, p_ptt,
-			      GRC_REG_PROTECTION_OVERRIDE_WINDOW,
-			      (u64)(uintptr_t)(dump_buf + offset),
-			      override_window_dwords, 0))
-		return DBG_STATUS_DMAE_FAILED;
-	offset += override_window_dwords;
+		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset,
+					  true,
+					  addr,
+					  override_window_dwords,
+					  true);
 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
 			   override_window_dwords);
 out:
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	*num_dumped_dwords = offset;
 
@@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		next_list_idx_addr = fw_asserts_section_addr +
 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
-		last_list_idx = (next_list_idx > 0
-				 ? next_list_idx
-				 : asserts->list_num_elements) - 1;
+		last_list_idx = (next_list_idx > 0 ?
+				 next_list_idx :
+				 asserts->list_num_elements) - 1;
 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
 		       asserts->list_dword_offset +
 		       last_list_idx * asserts->list_element_dword_size;
@@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 	}
 
 	/* Dump last section */
-	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
+	offset += qed_dump_last_section(dump_buf, offset, dump);
 
 	return offset;
 }
@@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data {
 
 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
 
-/********************************* Macros ************************************/
-
-#define BYTES_TO_DWORDS(bytes)			((bytes) / BYTES_IN_DWORD)
-
 /***************************** Constant Arrays *******************************/
 
 struct user_dbg_array {
@@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = {
 	{"phy_pcie", BLOCK_PHY_PCIE},
 	{"led", BLOCK_LED},
 	{"avs_wrap", BLOCK_AVS_WRAP},
+	{"pxpreqbus", BLOCK_PXPREQBUS},
 	{"misc_aeu", BLOCK_MISC_AEU},
 	{"bar0_map", BLOCK_BAR0_MAP}
 };
@@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = {
 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
 	"Failed to resume MCP after halt",
 
-	/* DBG_STATUS_DMAE_FAILED */
-	"DMAE transaction failed",
+	/* DBG_STATUS_RESERVED2 */
+	"Reserved debug status - shouldn't be returned",
 
 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
 	"Failed to empty SEMI sync FIFO",
@@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf,
 	if (*(char_buf + offset++)) {
 		/* String param */
 		*param_str_val = char_buf + offset;
+		*param_num_val = 0;
 		offset += strlen(*param_str_val) + 1;
 		if (offset & 0x3)
 			offset += (4 - (offset & 0x3));
@@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf,
 /* Parses the idle check rules and returns the number of characters printed.
  * In case of parsing error, returns 0.
  */
-static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
-					 u32 *dump_buf,
+static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
 					 u32 *dump_buf_end,
 					 u32 num_rules,
 					 bool print_fw_idle_chk,
@@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
-					       u32 *dump_buf,
+static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
 					       u32 num_dumped_dwords,
 					       char *results_buf,
 					       u32 *parsed_results_bytes,
@@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
 					    results_offset),
 			    "FW_IDLE_CHECK:\n");
 		rules_print_size =
-			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-						      dump_buf_end, num_rules,
+			qed_parse_idle_chk_dump_rules(dump_buf,
+						      dump_buf_end,
+						      num_rules,
 						      true,
 						      results_buf ?
 						      results_buf +
-						      results_offset : NULL,
-						      num_errors, num_warnings);
+						      results_offset :
+						      NULL,
+						      num_errors,
+						      num_warnings);
 		results_offset += rules_print_size;
 		if (!rules_print_size)
 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
 					    results_offset),
 			    "\nLSI_IDLE_CHECK:\n");
 		rules_print_size =
-			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
-						      dump_buf_end, num_rules,
+			qed_parse_idle_chk_dump_rules(dump_buf,
+						      dump_buf_end,
+						      num_rules,
 						      false,
 						      results_buf ?
 						      results_buf +
-						      results_offset : NULL,
-						      num_errors, num_warnings);
+						      results_offset :
+						      NULL,
+						      num_errors,
+						      num_warnings);
 		results_offset += rules_print_size;
 		if (!rules_print_size)
 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
@@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
  */
 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 						u32 *dump_buf,
-						u32 num_dumped_dwords,
 						char *results_buf,
 						u32 *parsed_results_bytes)
 {
@@ -6725,9 +6794,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
-					       u32 *dump_buf,
-					       u32 num_dumped_dwords,
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
 					       char *results_buf,
 					       u32 *parsed_results_bytes)
 {
@@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
 						  *element, char
 						  *results_buf,
-						  u32 *results_offset,
-						  u32 *parsed_results_bytes)
+						  u32 *results_offset)
 {
 	const struct igu_fifo_addr_data *found_addr = NULL;
 	u8 source, err_type, i, is_cleanup;
@@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
 				prod_cons,
 				update_flag ? "update" : "nop",
-				en_dis_int_for_sb
-				? (en_dis_int_for_sb == 1 ? "disable" : "nop")
-				: "enable",
+				en_dis_int_for_sb ?
+				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
+				"enable",
 				segment ? "attn" : "regular",
 				timer_mask);
 		}
@@ -6969,9 +7035,7 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
-					       u32 *dump_buf,
-					       u32 num_dumped_dwords,
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
 					       char *results_buf,
 					       u32 *parsed_results_bytes)
 {
@@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 	for (i = 0; i < num_elements; i++) {
 		status = qed_parse_igu_fifo_element(&elements[i],
 						    results_buf,
-						    &results_offset,
-						    parsed_results_bytes);
+						    &results_offset);
 		if (status != DBG_STATUS_OK)
 			return status;
 	}
@@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 }
 
 static enum dbg_status
-qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
-				   u32 *dump_buf,
-				   u32 num_dumped_dwords,
+qed_parse_protection_override_dump(u32 *dump_buf,
 				   char *results_buf,
 				   u32 *parsed_results_bytes)
 {
@@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
  * parsed_results_bytes.
  * The parsing status is returned.
  */
-static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
-						 u32 *dump_buf,
-						 u32 num_dumped_dwords,
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
 						 char *results_buf,
 						 u32 *parsed_results_bytes)
 {
@@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
 {
 	u32 num_errors, num_warnings;
 
-	return qed_parse_idle_chk_dump(p_hwfn,
-				       dump_buf,
+	return qed_parse_idle_chk_dump(dump_buf,
 				       num_dumped_dwords,
 				       NULL,
 				       results_buf_size,
@@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
 					   u32 *dump_buf,
 					   u32 num_dumped_dwords,
 					   char *results_buf,
-					   u32 *num_errors, u32 *num_warnings)
+					   u32 *num_errors,
+					   u32 *num_warnings)
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_idle_chk_dump(p_hwfn,
-				       dump_buf,
+	return qed_parse_idle_chk_dump(dump_buf,
 				       num_dumped_dwords,
 				       results_buf,
 				       &parsed_buf_size,
@@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
 						   u32 *results_buf_size)
 {
 	return qed_parse_mcp_trace_dump(p_hwfn,
-					dump_buf,
-					num_dumped_dwords,
-					NULL, results_buf_size);
+					dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
@@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
 
 	return qed_parse_mcp_trace_dump(p_hwfn,
 					dump_buf,
-					num_dumped_dwords,
 					results_buf, &parsed_buf_size);
 }
 
@@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
 						  u32 num_dumped_dwords,
 						  u32 *results_buf_size)
 {
-	return qed_parse_reg_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       NULL, results_buf_size);
+	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_reg_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       results_buf, &parsed_buf_size);
+	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
@@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
 						  u32 num_dumped_dwords,
 						  u32 *results_buf_size)
 {
-	return qed_parse_igu_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       NULL, results_buf_size);
+	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
@@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_igu_fifo_dump(p_hwfn,
-				       dump_buf,
-				       num_dumped_dwords,
-				       results_buf, &parsed_buf_size);
+	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
 }
 
 enum dbg_status
@@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
 					     u32 num_dumped_dwords,
 					     u32 *results_buf_size)
 {
-	return qed_parse_protection_override_dump(p_hwfn,
-						  dump_buf,
-						  num_dumped_dwords,
+	return qed_parse_protection_override_dump(dump_buf,
 						  NULL, results_buf_size);
 }
 
@@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_protection_override_dump(p_hwfn,
-						  dump_buf,
-						  num_dumped_dwords,
+	return qed_parse_protection_override_dump(dump_buf,
 						  results_buf,
 						  &parsed_buf_size);
 }
@@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
 						    u32 num_dumped_dwords,
 						    u32 *results_buf_size)
 {
-	return qed_parse_fw_asserts_dump(p_hwfn,
-					 dump_buf,
-					 num_dumped_dwords,
-					 NULL, results_buf_size);
+	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
 }
 
 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
@@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
 {
 	u32 parsed_buf_size;
 
-	return qed_parse_fw_asserts_dump(p_hwfn,
-					 dump_buf,
-					 num_dumped_dwords,
+	return qed_parse_fw_asserts_dump(dump_buf,
 					 results_buf, &parsed_buf_size);
 }
 
@@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
 
 	/* Go over registers with a non-zero attention status */
 	for (i = 0; i < num_regs; i++) {
+		struct dbg_attn_bit_mapping *bit_mapping;
 		struct dbg_attn_reg_result *reg_result;
-		struct dbg_attn_bit_mapping *mapping;
 		u8 num_reg_attn, bit_idx = 0;
 
 		reg_result = &results->reg_results[i];
 		num_reg_attn = GET_FIELD(reg_result->data,
 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
-		mapping = &((struct dbg_attn_bit_mapping *)
-			    block_attn->ptr)[reg_result->block_attn_offset];
+		bit_mapping = &((struct dbg_attn_bit_mapping *)
+				block_attn->ptr)[reg_result->block_attn_offset];
 
 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
 
 		/* Go over attention status bits */
 		for (j = 0; j < num_reg_attn; j++) {
-			u16 attn_idx_val = GET_FIELD(mapping[j].data,
+			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
 						     DBG_ATTN_BIT_MAPPING_VAL);
 			const char *attn_name, *attn_type_str, *masked_str;
-			u32 name_offset, sts_addr;
+			u32 attn_name_offset, sts_addr;
 
 			/* Check if bit mask should be advanced (due to unused
 			 * bits).
 			 */
-			if (GET_FIELD(mapping[j].data,
+			if (GET_FIELD(bit_mapping[j].data,
 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
 				bit_idx += (u8)attn_idx_val;
 				continue;
@@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
 			}
 
 			/* Find attention name */
-			name_offset = block_attn_name_offsets[attn_idx_val];
+			attn_name_offset =
+				block_attn_name_offsets[attn_idx_val];
 			attn_name = &((const char *)
-				      pstrings->ptr)[name_offset];
+				      pstrings->ptr)[attn_name_offset];
 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
 					"Interrupt" : "Parity";
 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 58a689f..553a6d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
 /* This function reconfigures the QM pf on the fly.
  * For this purpose we:
  * 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
  * 4. activate init tool in QM_PF stage
  * 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	qed_init_clear_rt_data(p_hwfn);
 
 	/* prepare QM portion of runtime array */
-	qed_qm_init_pf(p_hwfn, p_ptt);
+	qed_qm_init_pf(p_hwfn, p_ptt, false);
 
 	/* activate init tool on runtime array */
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
 	}
 
-	/* Protocl Configuration  */
+	/* Protocol Configuration */
 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
 		     (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
 	if (rc)
 		return rc;
 
+	/* Sanity check before the PF init sequence that uses DMAE */
+	rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+	if (rc)
+		return rc;
+
 	/* PF Init sequence */
 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
 	if (rc)
@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 			/* No need for a case for QED_CMDQS_CQS since
 			 * CNQ/CMDQS are the same resource.
 			 */
-			resc_max_val = NUM_OF_CMDQS_CQS;
+			resc_max_val = NUM_OF_GLOBAL_QUEUES;
 			break;
 		case QED_RDMA_STATS_QUEUE:
 			resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
 	case QED_RDMA_CNQ_RAM:
 	case QED_CMDQS_CQS:
 		/* CNQ/CMDQS are the same resource */
-		*p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+		*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
 		break;
 	case QED_RDMA_STATS_QUEUE:
 		*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index df195c0..2dc9b31 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
 	struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
 	struct fcoe_init_ramrod_params *p_ramrod = NULL;
 	struct fcoe_init_func_ramrod_data *p_data;
-	struct fcoe_conn_context *p_cxt = NULL;
+	struct e4_fcoe_conn_context *p_cxt = NULL;
 	struct qed_spq_entry *p_ent = NULL;
 	struct qed_sp_init_data init_data;
 	struct qed_cxt_info cxt_info;
@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
 	}
 	p_cxt = cxt_info.p_cxt;
 	SET_FIELD(p_cxt->tstorm_ag_context.flags3,
-		  TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+		  E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
 
 	fcoe_pf_params->dummy_icid = (u16)dummy_cid;
 
@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
 
 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
 {
-	struct fcoe_task_context *p_task_ctx = NULL;
+	struct e4_fcoe_task_context *p_task_ctx = NULL;
 	int rc;
 	u32 i;
 
@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
 		if (rc)
 			continue;
 
-		memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+		memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
 		SET_FIELD(p_task_ctx->timer_context.logical_client_0,
 			  TIMERS_CONTEXT_VALIDLC0, 1);
 		SET_FIELD(p_task_ctx->timer_context.logical_client_1,
 			  TIMERS_CONTEXT_VALIDLC1, 1);
 		SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
-			  TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+			  E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
 	}
 }
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 3427fe70..de873d7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -54,7 +54,7 @@
 struct qed_hwfn;
 struct qed_ptt;
 
-/* opcodes for the event ring */
+/* Opcodes for the event ring */
 enum common_event_opcode {
 	COMMON_EVENT_PF_START,
 	COMMON_EVENT_PF_STOP,
@@ -82,6 +82,300 @@ enum common_ramrod_cmd_id {
 	MAX_COMMON_RAMROD_CMD_ID
 };
 
+/* How ll2 should deal with packet upon errors */
+enum core_error_handle {
+	LL2_DROP_PACKET,
+	LL2_DO_NOTHING,
+	LL2_ASSERT,
+	MAX_CORE_ERROR_HANDLE
+};
+
+/* Opcodes for the event ring */
+enum core_event_opcode {
+	CORE_EVENT_TX_QUEUE_START,
+	CORE_EVENT_TX_QUEUE_STOP,
+	CORE_EVENT_RX_QUEUE_START,
+	CORE_EVENT_RX_QUEUE_STOP,
+	CORE_EVENT_RX_QUEUE_FLUSH,
+	CORE_EVENT_TX_QUEUE_UPDATE,
+	MAX_CORE_EVENT_OPCODE
+};
+
+/* The L4 pseudo checksum mode for Core */
+enum core_l4_pseudo_checksum_mode {
+	CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+	CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+	MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+/* Light-L2 RX Producers in Tstorm RAM */
+struct core_ll2_port_stats {
+	struct regpair gsi_invalid_hdr;
+	struct regpair gsi_invalid_pkt_length;
+	struct regpair gsi_unsupported_pkt_typ;
+	struct regpair gsi_crcchksm_error;
+};
+
+/* Ethernet TX Per Queue Stats */
+struct core_ll2_pstorm_per_queue_stat {
+	struct regpair sent_ucast_bytes;
+	struct regpair sent_mcast_bytes;
+	struct regpair sent_bcast_bytes;
+	struct regpair sent_ucast_pkts;
+	struct regpair sent_mcast_pkts;
+	struct regpair sent_bcast_pkts;
+};
+
+/* Light-L2 RX Producers in Tstorm RAM */
+struct core_ll2_rx_prod {
+	__le16 bd_prod;
+	__le16 cqe_prod;
+	__le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+	struct regpair packet_too_big_discard;
+	struct regpair no_buff_discard;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+	struct regpair rcv_ucast_bytes;
+	struct regpair rcv_mcast_bytes;
+	struct regpair rcv_bcast_bytes;
+	struct regpair rcv_ucast_pkts;
+	struct regpair rcv_mcast_pkts;
+	struct regpair rcv_bcast_pkts;
+};
+
+/* Core Ramrod Command IDs (light L2) */
+enum core_ramrod_cmd_id {
+	CORE_RAMROD_UNUSED,
+	CORE_RAMROD_RX_QUEUE_START,
+	CORE_RAMROD_TX_QUEUE_START,
+	CORE_RAMROD_RX_QUEUE_STOP,
+	CORE_RAMROD_TX_QUEUE_STOP,
+	CORE_RAMROD_RX_QUEUE_FLUSH,
+	CORE_RAMROD_TX_QUEUE_UPDATE,
+	MAX_CORE_RAMROD_CMD_ID
+};
+
+/* Core RX CQE Type for Light L2 */
+enum core_roce_flavor_type {
+	CORE_ROCE,
+	CORE_RROCE,
+	MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff.
+ */
+struct core_rx_action_on_error {
+	u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK	0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT	0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK		0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT		2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK		0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT		4
+};
+
+/* Core RX BD for Light L2 */
+struct core_rx_bd {
+	struct regpair addr;
+	__le16 reserved[4];
+};
+
+/* Core RX CM offload BD for Light L2 */
+struct core_rx_bd_with_buff_len {
+	struct regpair addr;
+	__le16 buff_length;
+	__le16 reserved[3];
+};
+
+/* Core RX CM offload BD for Light L2 */
+union core_rx_bd_union {
+	struct core_rx_bd rx_bd;
+	struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+/* Opaque Data for Light L2 RX CQE */
+struct core_rx_cqe_opaque_data {
+	__le32 data[2];
+};
+
+/* Core RX CQE Type for Light L2 */
+enum core_rx_cqe_type {
+	CORE_RX_CQE_ILLEGAL_TYPE,
+	CORE_RX_CQE_TYPE_REGULAR,
+	CORE_RX_CQE_TYPE_GSI_OFFLOAD,
+	CORE_RX_CQE_TYPE_SLOW_PATH,
+	MAX_CORE_RX_CQE_TYPE
+};
+
+/* Core RX CQE for Light L2 */
+struct core_rx_fast_path_cqe {
+	u8 type;
+	u8 placement_offset;
+	struct parsing_and_err_flags parse_flags;
+	__le16 packet_length;
+	__le16 vlan;
+	struct core_rx_cqe_opaque_data opaque_data;
+	struct parsing_err_flags err_flags;
+	__le16 reserved0;
+	__le32 reserved1[3];
+};
+
+/* Core Rx CM offload CQE */
+struct core_rx_gsi_offload_cqe {
+	u8 type;
+	u8 data_length_error;
+	struct parsing_and_err_flags parse_flags;
+	__le16 data_length;
+	__le16 vlan;
+	__le32 src_mac_addrhi;
+	__le16 src_mac_addrlo;
+	__le16 qp_id;
+	__le32 src_qp;
+	__le32 reserved[3];
+};
+
+/* Core RX CQE for Light L2 */
+struct core_rx_slow_path_cqe {
+	u8 type;
+	u8 ramrod_cmd_id;
+	__le16 echo;
+	struct core_rx_cqe_opaque_data opaque_data;
+	__le32 reserved1[5];
+};
+
+/* Core RX CM offload BD for Light L2 */
+union core_rx_cqe_union {
+	struct core_rx_fast_path_cqe rx_cqe_fp;
+	struct core_rx_gsi_offload_cqe rx_cqe_gsi;
+	struct core_rx_slow_path_cqe rx_cqe_sp;
+};
+
+/* Ramrod data for rx queue start ramrod */
+struct core_rx_start_ramrod_data {
+	struct regpair bd_base;
+	struct regpair cqe_pbl_addr;
+	__le16 mtu;
+	__le16 sb_id;
+	u8 sb_index;
+	u8 complete_cqe_flg;
+	u8 complete_event_flg;
+	u8 drop_ttl0_flg;
+	__le16 num_of_pbl_pages;
+	u8 inner_vlan_stripping_en;
+	u8 report_outer_vlan;
+	u8 queue_id;
+	u8 main_func_queue;
+	u8 mf_si_bcast_accept_all;
+	u8 mf_si_mcast_accept_all;
+	struct core_rx_action_on_error action_on_error;
+	u8 gsi_offload_flag;
+	u8 reserved[6];
+};
+
+/* Ramrod data for rx queue stop ramrod */
+struct core_rx_stop_ramrod_data {
+	u8 complete_cqe_flg;
+	u8 complete_event_flg;
+	u8 queue_id;
+	u8 reserved1;
+	__le16 reserved2[2];
+};
+
+/* Flags for Core TX BD */
+struct core_tx_bd_data {
+	__le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK		0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT		0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK		0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT		1
+#define CORE_TX_BD_DATA_START_BD_MASK			0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT			2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK			0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT			3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK			0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT			4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK			0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT			5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK		0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT		6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK	0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT	7
+#define CORE_TX_BD_DATA_NBDS_MASK			0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT			8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK			0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT			12
+#define CORE_TX_BD_DATA_IP_LEN_MASK			0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT			13
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK	0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT	14
+#define CORE_TX_BD_DATA_RESERVED0_MASK			0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT			15
+};
+
+/* Core TX BD for Light L2 */
+struct core_tx_bd {
+	struct regpair addr;
+	__le16 nbytes;
+	__le16 nw_vlan_or_lb_echo;
+	struct core_tx_bd_data bd_data;
+	__le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK		0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT	0
+#define CORE_TX_BD_TX_DST_MASK			0x3
+#define CORE_TX_BD_TX_DST_SHIFT			14
+};
+
+/* Light L2 TX Destination */
+enum core_tx_dest {
+	CORE_TX_DEST_NW,
+	CORE_TX_DEST_LB,
+	CORE_TX_DEST_RESERVED,
+	CORE_TX_DEST_DROP,
+	MAX_CORE_TX_DEST
+};
+
+/* Ramrod data for tx queue start ramrod */
+struct core_tx_start_ramrod_data {
+	struct regpair pbl_base_addr;
+	__le16 mtu;
+	__le16 sb_id;
+	u8 sb_index;
+	u8 stats_en;
+	u8 stats_id;
+	u8 conn_type;
+	__le16 pbl_size;
+	__le16 qm_pq_id;
+	u8 gsi_offload_flag;
+	u8 resrved[3];
+};
+
+/* Ramrod data for tx queue stop ramrod */
+struct core_tx_stop_ramrod_data {
+	__le32 reserved0[2];
+};
+
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+	u8 update_qm_pq_id_flg;
+	u8 reserved0;
+	__le16 qm_pq_id;
+	__le32 reserved1[1];
+};
+
+/* Enum flag for what type of dcb data to update */
+enum dcb_dscp_update_mode {
+	DONT_UPDATE_DCB_DSCP,
+	UPDATE_DCB,
+	UPDATE_DSCP,
+	UPDATE_DCB_DSCP,
+	MAX_DCB_DSCP_UPDATE_MODE
+};
+
 /* The core storm context for the Ystorm */
 struct ystorm_core_conn_st_ctx {
 	__le32 reserved[4];
@@ -102,216 +396,216 @@ struct xstorm_core_conn_st_ctx {
 	__le32 reserved0[55];
 };
 
-struct xstorm_core_conn_ag_ctx {
+struct e4_xstorm_core_conn_ag_ctx {
 	u8 reserved0;
-	u8 core_state;
+	u8 state;
 	u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT		2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT		6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT		7
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT	7
 	u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT		2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT		3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
 	u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT		2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK			0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT			2
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK			0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT			4
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK		0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT		6
 	u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK		0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		7
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT	5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT	7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT			0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT			1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT			2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT			3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT			4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT			5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK			0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT			7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT			4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT			5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT			7
 	u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT	7
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT		2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT		3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT		4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK			0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT			5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT		6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT		7
 	u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT		3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT		7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT		4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT		5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT		6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT		7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT		0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK		0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT		1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT	0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT	1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT	2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT	3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT	4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK	0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT	5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK	0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT	6
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT	0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT	1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT	2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT	3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT	4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK	0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT	5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK	0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT	6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 consolid_prod;
@@ -365,89 +659,89 @@ struct xstorm_core_conn_ag_ctx {
 	__le16 word15;
 };
 
-struct tstorm_core_conn_ag_ctx {
+struct e4_tstorm_core_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1	/* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1	/* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1	/* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1	/* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1	/* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1	/* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3	/* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT	3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT	5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	6
 	u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3	/* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3	/* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3	/* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3	/* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT	6
 	u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3	/* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3	/* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3	/* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3	/* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3	/* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3	/* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1	/* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1	/* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1	/* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1	/* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1	/* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1	/* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1	/* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1	/* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1	/* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1	/* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1	/* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1	/* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT		0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT		1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT		2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT		3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT		4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT		5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT		6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1	/* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1	/* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1	/* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1	/* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1	/* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1	/* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1	/* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1	/* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT	7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 reg2;
@@ -469,63 +763,63 @@ struct tstorm_core_conn_ag_ctx {
 	__le32 reg10;
 };
 
-struct ustorm_core_conn_ag_ctx {
+struct e4_ustorm_core_conn_ag_ctx {
 	u8 reserved;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT	6
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT	3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT	5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT	6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	7
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT		3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT		4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT		5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT		6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK	0x1
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT	7
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -549,272 +843,20 @@ struct ustorm_core_conn_st_ctx {
 };
 
 /* core connection context */
-struct core_conn_context {
+struct e4_core_conn_context {
 	struct ystorm_core_conn_st_ctx ystorm_st_context;
 	struct regpair ystorm_st_padding[2];
 	struct pstorm_core_conn_st_ctx pstorm_st_context;
 	struct regpair pstorm_st_padding[2];
 	struct xstorm_core_conn_st_ctx xstorm_st_context;
-	struct xstorm_core_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_core_conn_ag_ctx tstorm_ag_context;
-	struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+	struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
+	struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
 	struct mstorm_core_conn_st_ctx mstorm_st_context;
 	struct ustorm_core_conn_st_ctx ustorm_st_context;
 	struct regpair ustorm_st_padding[2];
 };
 
-enum core_error_handle {
-	LL2_DROP_PACKET,
-	LL2_DO_NOTHING,
-	LL2_ASSERT,
-	MAX_CORE_ERROR_HANDLE
-};
-
-enum core_event_opcode {
-	CORE_EVENT_TX_QUEUE_START,
-	CORE_EVENT_TX_QUEUE_STOP,
-	CORE_EVENT_RX_QUEUE_START,
-	CORE_EVENT_RX_QUEUE_STOP,
-	CORE_EVENT_RX_QUEUE_FLUSH,
-	MAX_CORE_EVENT_OPCODE
-};
-
-enum core_l4_pseudo_checksum_mode {
-	CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
-	CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
-	MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
-};
-
-struct core_ll2_port_stats {
-	struct regpair gsi_invalid_hdr;
-	struct regpair gsi_invalid_pkt_length;
-	struct regpair gsi_unsupported_pkt_typ;
-	struct regpair gsi_crcchksm_error;
-};
-
-struct core_ll2_pstorm_per_queue_stat {
-	struct regpair sent_ucast_bytes;
-	struct regpair sent_mcast_bytes;
-	struct regpair sent_bcast_bytes;
-	struct regpair sent_ucast_pkts;
-	struct regpair sent_mcast_pkts;
-	struct regpair sent_bcast_pkts;
-};
-
-struct core_ll2_rx_prod {
-	__le16 bd_prod;
-	__le16 cqe_prod;
-	__le32 reserved;
-};
-
-struct core_ll2_tstorm_per_queue_stat {
-	struct regpair packet_too_big_discard;
-	struct regpair no_buff_discard;
-};
-
-struct core_ll2_ustorm_per_queue_stat {
-	struct regpair rcv_ucast_bytes;
-	struct regpair rcv_mcast_bytes;
-	struct regpair rcv_bcast_bytes;
-	struct regpair rcv_ucast_pkts;
-	struct regpair rcv_mcast_pkts;
-	struct regpair rcv_bcast_pkts;
-};
-
-enum core_ramrod_cmd_id {
-	CORE_RAMROD_UNUSED,
-	CORE_RAMROD_RX_QUEUE_START,
-	CORE_RAMROD_TX_QUEUE_START,
-	CORE_RAMROD_RX_QUEUE_STOP,
-	CORE_RAMROD_TX_QUEUE_STOP,
-	CORE_RAMROD_RX_QUEUE_FLUSH,
-	MAX_CORE_RAMROD_CMD_ID
-};
-
-enum core_roce_flavor_type {
-	CORE_ROCE,
-	CORE_RROCE,
-	MAX_CORE_ROCE_FLAVOR_TYPE
-};
-
-struct core_rx_action_on_error {
-	u8 error_type;
-#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK	0x3
-#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK	0x3
-#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT	2
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK	0xF
-#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT	4
-};
-
-struct core_rx_bd {
-	struct regpair addr;
-	__le16 reserved[4];
-};
-
-struct core_rx_bd_with_buff_len {
-	struct regpair addr;
-	__le16 buff_length;
-	__le16 reserved[3];
-};
-
-union core_rx_bd_union {
-	struct core_rx_bd rx_bd;
-	struct core_rx_bd_with_buff_len rx_bd_with_len;
-};
-
-struct core_rx_cqe_opaque_data {
-	__le32 data[2];
-};
-
-enum core_rx_cqe_type {
-	CORE_RX_CQE_ILLIGAL_TYPE,
-	CORE_RX_CQE_TYPE_REGULAR,
-	CORE_RX_CQE_TYPE_GSI_OFFLOAD,
-	CORE_RX_CQE_TYPE_SLOW_PATH,
-	MAX_CORE_RX_CQE_TYPE
-};
-
-struct core_rx_fast_path_cqe {
-	u8 type;
-	u8 placement_offset;
-	struct parsing_and_err_flags parse_flags;
-	__le16 packet_length;
-	__le16 vlan;
-	struct core_rx_cqe_opaque_data opaque_data;
-	struct parsing_err_flags err_flags;
-	__le16 reserved0;
-	__le32 reserved1[3];
-};
-
-struct core_rx_gsi_offload_cqe {
-	u8 type;
-	u8 data_length_error;
-	struct parsing_and_err_flags parse_flags;
-	__le16 data_length;
-	__le16 vlan;
-	__le32 src_mac_addrhi;
-	__le16 src_mac_addrlo;
-	__le16 qp_id;
-	__le32 gid_dst[4];
-};
-
-struct core_rx_slow_path_cqe {
-	u8 type;
-	u8 ramrod_cmd_id;
-	__le16 echo;
-	struct core_rx_cqe_opaque_data opaque_data;
-	__le32 reserved1[5];
-};
-
-union core_rx_cqe_union {
-	struct core_rx_fast_path_cqe rx_cqe_fp;
-	struct core_rx_gsi_offload_cqe rx_cqe_gsi;
-	struct core_rx_slow_path_cqe rx_cqe_sp;
-};
-
-struct core_rx_start_ramrod_data {
-	struct regpair bd_base;
-	struct regpair cqe_pbl_addr;
-	__le16 mtu;
-	__le16 sb_id;
-	u8 sb_index;
-	u8 complete_cqe_flg;
-	u8 complete_event_flg;
-	u8 drop_ttl0_flg;
-	__le16 num_of_pbl_pages;
-	u8 inner_vlan_removal_en;
-	u8 queue_id;
-	u8 main_func_queue;
-	u8 mf_si_bcast_accept_all;
-	u8 mf_si_mcast_accept_all;
-	struct core_rx_action_on_error action_on_error;
-	u8 gsi_offload_flag;
-	u8 reserved[7];
-};
-
-struct core_rx_stop_ramrod_data {
-	u8 complete_cqe_flg;
-	u8 complete_event_flg;
-	u8 queue_id;
-	u8 reserved1;
-	__le16 reserved2[2];
-};
-
-struct core_tx_bd_data {
-	__le16 as_bitfield;
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK	0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK	0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
-#define CORE_TX_BD_DATA_START_BD_MASK	0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT            2
-#define CORE_TX_BD_DATA_IP_CSUM_MASK	0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
-#define CORE_TX_BD_DATA_L4_CSUM_MASK	0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK	0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK	0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK	0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
-#define CORE_TX_BD_DATA_NBDS_MASK	0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT                8
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK	0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
-#define CORE_TX_BD_DATA_IP_LEN_MASK	0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
-#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
-};
-
-struct core_tx_bd {
-	struct regpair addr;
-	__le16 nbytes;
-	__le16 nw_vlan_or_lb_echo;
-	struct core_tx_bd_data bd_data;
-	__le16 bitfield1;
-#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK	0x3FFF
-#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-#define CORE_TX_BD_TX_DST_MASK		0x3
-#define CORE_TX_BD_TX_DST_SHIFT		14
-};
-
-enum core_tx_dest {
-	CORE_TX_DEST_NW,
-	CORE_TX_DEST_LB,
-	CORE_TX_DEST_RESERVED,
-	CORE_TX_DEST_DROP,
-	MAX_CORE_TX_DEST
-};
-
-struct core_tx_start_ramrod_data {
-	struct regpair pbl_base_addr;
-	__le16 mtu;
-	__le16 sb_id;
-	u8 sb_index;
-	u8 stats_en;
-	u8 stats_id;
-	u8 conn_type;
-	__le16 pbl_size;
-	__le16 qm_pq_id;
-	u8 gsi_offload_flag;
-	u8 resrved[3];
-};
-
-struct core_tx_stop_ramrod_data {
-	__le32 reserved0[2];
-};
-
-enum dcb_dscp_update_mode {
-	DONT_UPDATE_DCB_DSCP,
-	UPDATE_DCB,
-	UPDATE_DSCP,
-	UPDATE_DCB_DSCP,
-	MAX_DCB_DSCP_UPDATE_MODE
-};
-
 struct eth_mstorm_per_pf_stat {
 	struct regpair gre_discard_pkts;
 	struct regpair vxlan_discard_pkts;
@@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat {
 	struct regpair rcv_bcast_pkts;
 };
 
+/* Event Ring VF-PF Channel data */
+struct vf_pf_channel_eqe_data {
+	struct regpair msg_addr;
+};
+
+/* Event Ring malicious VF data */
+struct malicious_vf_eqe_data {
+	u8 vf_id;
+	u8 err_id;
+	__le16 reserved[3];
+};
+
+/* Event Ring initial cleanup data */
+struct initial_cleanup_eqe_data {
+	u8 vf_id;
+	u8 reserved[7];
+};
+
+/* Event Data Union */
+union event_ring_data {
+	u8 bytes[8];
+	struct vf_pf_channel_eqe_data vf_pf_channel;
+	struct iscsi_eqe_data iscsi_info;
+	struct iscsi_connect_done_results iscsi_conn_done_info;
+	union rdma_eqe_data rdma_data;
+	struct malicious_vf_eqe_data malicious_vf;
+	struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+	u8 protocol_id;
+	u8 opcode;
+	__le16 reserved0;
+	__le16 echo;
+	u8 fw_return_code;
+	u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK		0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT		0
+#define EVENT_RING_ENTRY_RESERVED1_MASK		0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT	1
+	union event_ring_data data;
+};
+
 /* Event Ring Next Page Address */
 struct event_ring_next_addr {
 	struct regpair addr;
@@ -908,12 +994,21 @@ union event_ring_element {
 	struct event_ring_next_addr next_addr;
 };
 
+/* Ports mode */
 enum fw_flow_ctrl_mode {
 	flow_ctrl_pause,
 	flow_ctrl_pfc,
 	MAX_FW_FLOW_CTRL_MODE
 };
 
+/* GFT profile type */
+enum gft_profile_type {
+	GFT_PROFILE_TYPE_4_TUPLE,
+	GFT_PROFILE_TYPE_L4_DST_PORT,
+	GFT_PROFILE_TYPE_IP_DST_PORT,
+	MAX_GFT_PROFILE_TYPE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
 	u8 minor_ver_arr[2];
@@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct {
 };
 
 enum iwarp_ll2_tx_queues {
-	IWARP_LL2_IN_ORDER_TX_QUEUE =			1,
+	IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
 	IWARP_LL2_ALIGNED_TX_QUEUE,
 	IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
 	IWARP_LL2_ERROR,
 	MAX_IWARP_LL2_TX_QUEUES
 };
 
-/* Mstorm non-triggering VF zone */
+/* Malicious VF error ID */
 enum malicious_vf_error_id {
 	MALICIOUS_VF_NO_ERROR,
 	VF_PF_CHANNEL_NOT_READY,
@@ -951,9 +1046,11 @@ enum malicious_vf_error_id {
 	ETH_TUNN_IPV6_EXT_NBD_ERR,
 	ETH_CONTROL_PACKET_VIOLATION,
 	ETH_ANTI_SPOOFING_ERR,
+	ETH_PACKET_SIZE_TOO_LARGE,
 	MAX_MALICIOUS_VF_ERROR_ID
 };
 
+/* Mstorm non-triggering VF zone */
 struct mstorm_non_trigger_vf_zone {
 	struct eth_mstorm_per_queue_stat eth_queue_stat;
 	struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
@@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone {
 /* Mstorm VF zone */
 struct mstorm_vf_zone {
 	struct mstorm_non_trigger_vf_zone non_trigger;
+};
 
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+	__le16 tpid;
+	__le16 tci;
+};
+
+/* outer tag configurations */
+struct outer_tag_config_struct {
+	u8 enable_stag_pri_change;
+	u8 pri_map_valid;
+	u8 reserved[2];
+	struct vlan_header outer_tag;
+	u8 inner_to_outer_pri_map[8];
 };
 
 /* personality per PF */
@@ -974,7 +1085,7 @@ enum personality_type {
 	PERSONALITY_RDMA,
 	PERSONALITY_CORE,
 	PERSONALITY_ETH,
-	PERSONALITY_RESERVED4,
+	PERSONALITY_RESERVED,
 	MAX_PERSONALITY_TYPE
 };
 
@@ -997,7 +1108,6 @@ struct pf_start_ramrod_data {
 	struct regpair event_ring_pbl_addr;
 	struct regpair consolid_q_pbl_addr;
 	struct pf_start_tunnel_config tunnel_config;
-	__le32 reserved;
 	__le16 event_ring_sb_id;
 	u8 base_vf_id;
 	u8 num_vfs;
@@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data {
 	u8 mf_mode;
 	u8 integ_phase;
 	u8 allow_npar_tx_switching;
-	u8 inner_to_outer_pri_map[8];
-	u8 pri_map_valid;
-	__le32 outer_tag;
+	u8 reserved0;
 	struct hsi_fp_ver_struct hsi_fp_ver;
+	struct outer_tag_config_struct outer_tag_config;
 };
 
+/* Data for port update ramrod */
 struct protocol_dcb_data {
 	u8 dcb_enable_flag;
-	u8 reserved_a;
+	u8 dscp_enable_flag;
 	u8 dcb_priority;
 	u8 dcb_tc;
-	u8 reserved_b;
+	u8 dscp_val;
 	u8 reserved0;
 };
 
+/* Update tunnel configuration */
 struct pf_update_tunnel_config {
 	u8 update_rx_pf_clss;
 	u8 update_rx_def_ucast_clss;
@@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config {
 	__le16 reserved;
 };
 
+/* Data for port update ramrod */
 struct pf_update_ramrod_data {
-	u8 pf_id;
 	u8 update_eth_dcb_data_mode;
 	u8 update_fcoe_dcb_data_mode;
 	u8 update_iscsi_dcb_data_mode;
@@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data {
 	u8 update_rroce_dcb_data_mode;
 	u8 update_iwarp_dcb_data_mode;
 	u8 update_mf_vlan_flag;
+	u8 update_enable_stag_pri_change;
 	struct protocol_dcb_data eth_dcb_data;
 	struct protocol_dcb_data fcoe_dcb_data;
 	struct protocol_dcb_data iscsi_dcb_data;
@@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data {
 	struct protocol_dcb_data rroce_dcb_data;
 	struct protocol_dcb_data iwarp_dcb_data;
 	__le16 mf_vlan;
-	__le16 reserved;
+	u8 enable_stag_pri_change;
+	u8 reserved;
 	struct pf_update_tunnel_config tunnel_config;
 };
 
@@ -1079,11 +1192,13 @@ enum protocol_version_array_key {
 	MAX_PROTOCOL_VERSION_ARRAY_KEY
 };
 
+/* RDMA TX Stats */
 struct rdma_sent_stats {
 	struct regpair sent_bytes;
 	struct regpair sent_pkts;
 };
 
+/* Pstorm non-triggering VF zone */
 struct pstorm_non_trigger_vf_zone {
 	struct eth_pstorm_per_queue_stat eth_queue_stat;
 	struct rdma_sent_stats rdma_stats;
@@ -1103,11 +1218,34 @@ struct ramrod_header {
 	__le16 echo;
 };
 
+/* RDMA RX Stats */
 struct rdma_rcv_stats {
 	struct regpair rcv_bytes;
 	struct regpair rcv_pkts;
 };
 
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+	u8 qcn_update_param_flg;
+	u8 dcqcn_update_param_flg;
+	u8 rl_init_flg;
+	u8 rl_start_flg;
+	u8 rl_stop_flg;
+	u8 rl_id_first;
+	u8 rl_id_last;
+	u8 rl_dc_qcn_flg;
+	__le32 rl_bc_rate;
+	__le16 rl_max_rate;
+	__le16 rl_r_ai;
+	__le16 rl_r_hai;
+	__le16 dcqcn_g;
+	__le32 dcqcn_k_us;
+	__le32 dcqcn_timeuot_us;
+	__le32 qcn_timeuot_us;
+	__le32 reserved[2];
+};
+
+/* Slowpath Element (SPQE) */
 struct slow_path_element {
 	struct ramrod_header hdr;
 	struct regpair data_ptr;
@@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat {
 	struct regpair roce_irregular_pkt;
 	struct regpair iwarp_irregular_pkt;
 	struct regpair eth_irregular_pkt;
-	struct regpair reserved1;
+	struct regpair toe_irregular_pkt;
 	struct regpair preroce_irregular_pkt;
 	struct regpair eth_gre_tunn_filter_discard;
 	struct regpair eth_vxlan_tunn_filter_discard;
 	struct regpair eth_geneve_tunn_filter_discard;
+	struct regpair eth_gft_drop_pkt;
 };
 
 /* Tstorm VF zone */
@@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data {
 	__le32 reserved2;
 };
 
+/* VF zone size mode */
 enum vf_zone_size_mode {
 	VF_ZONE_SIZE_MODE_DEFAULT,
 	VF_ZONE_SIZE_MODE_DOUBLE,
@@ -1204,6 +1344,7 @@ enum vf_zone_size_mode {
 	MAX_VF_ZONE_SIZE_MODE
 };
 
+/* Attentions status block */
 struct atten_status_block {
 	__le32 atten_bits;
 	__le32 atten_ack;
@@ -1212,12 +1353,6 @@ struct atten_status_block {
 	__le32 reserved1;
 };
 
-enum command_type_bit {
-	IGU_COMMAND_TYPE_NOP = 0,
-	IGU_COMMAND_TYPE_SET = 1,
-	MAX_COMMAND_TYPE_BIT
-};
-
 /* DMAE command */
 struct dmae_cmd {
 	__le32 opcode;
@@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum {
 	MAX_DMAE_CMD_SRC_ENUM
 };
 
-struct mstorm_core_conn_ag_ctx {
+struct e4_mstorm_core_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct ystorm_core_conn_ag_ctx {
+struct e4_ystorm_core_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
-#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT	0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT	1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT	2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask {
 };
 
 /* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
 	__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK		0x1
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT		0
-#define QM_RF_PQ_MAP_RL_ID_MASK			0xFF
-#define QM_RF_PQ_MAP_RL_ID_SHIFT		1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK		0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT		9
-#define QM_RF_PQ_MAP_VOQ_MASK			0x1F
-#define QM_RF_PQ_MAP_VOQ_SHIFT			18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK	0x3
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT	23
-#define QM_RF_PQ_MAP_RL_VALID_MASK		0x1
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT		25
-#define QM_RF_PQ_MAP_RESERVED_MASK		0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT		26
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK		0x1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT		0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK		0xFF
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT		1
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK		0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT		9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK		0x1F
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT		18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK	0x3
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT	23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK		0x1
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT		25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK		0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT		26
 };
 
 /* Completion params for aggregated interrupt completion */
@@ -1643,8 +1778,8 @@ enum block_addr {
 	GRCBASE_MULD = 0x4e0000,
 	GRCBASE_YULD = 0x4c8000,
 	GRCBASE_XYLD = 0x4c0000,
-	GRCBASE_PTLD = 0x590000,
-	GRCBASE_YPLD = 0x5b0000,
+	GRCBASE_PTLD = 0x5a0000,
+	GRCBASE_YPLD = 0x5c0000,
 	GRCBASE_PRM = 0x230000,
 	GRCBASE_PBF_PB1 = 0xda0000,
 	GRCBASE_PBF_PB2 = 0xda4000,
@@ -1675,6 +1810,7 @@ enum block_addr {
 	GRCBASE_PHY_PCIE = 0x620000,
 	GRCBASE_LED = 0x6b8000,
 	GRCBASE_AVS_WRAP = 0x6b0000,
+	GRCBASE_PXPREQBUS = 0x56000,
 	GRCBASE_MISC_AEU = 0x8000,
 	GRCBASE_BAR0_MAP = 0x1c00000,
 	MAX_BLOCK_ADDR
@@ -1766,6 +1902,7 @@ enum block_id {
 	BLOCK_PHY_PCIE,
 	BLOCK_LED,
 	BLOCK_AVS_WRAP,
+	BLOCK_PXPREQBUS,
 	BLOCK_MISC_AEU,
 	BLOCK_BAR0_MAP,
 	MAX_BLOCK_ID
@@ -1841,7 +1978,7 @@ struct dbg_attn_block_result {
 	struct dbg_attn_reg_result reg_results[15];
 };
 
-/* mode header */
+/* Mode header */
 struct dbg_mode_hdr {
 	__le16 data;
 #define DBG_MODE_HDR_EVAL_MODE_MASK		0x1
@@ -1863,80 +2000,83 @@ struct dbg_attn_reg {
 	__le32 mask_address;
 };
 
-/* attention types */
+/* Attention types */
 enum dbg_attn_type {
 	ATTN_TYPE_INTERRUPT,
 	ATTN_TYPE_PARITY,
 	MAX_DBG_ATTN_TYPE
 };
 
+/* Debug Bus block data */
 struct dbg_bus_block {
 	u8 num_of_lines;
 	u8 has_latency_events;
 	__le16 lines_offset;
 };
 
+/* Debug Bus block user data */
 struct dbg_bus_block_user_data {
 	u8 num_of_lines;
 	u8 has_latency_events;
 	__le16 names_offset;
 };
 
+/* Block Debug line data */
 struct dbg_bus_line {
 	u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK  0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK        0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT       4
-#define DBG_BUS_LINE_RESERVED_MASK       0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT      5
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK		0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT	0
+#define DBG_BUS_LINE_IS_256B_MASK		0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT		4
+#define DBG_BUS_LINE_RESERVED_MASK		0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT		5
 	u8 group_sizes;
 };
 
-/* condition header for registers dump */
+/* Condition header for registers dump */
 struct dbg_dump_cond_hdr {
 	struct dbg_mode_hdr mode; /* Mode header */
 	u8 block_id; /* block ID */
 	u8 data_size; /* size in dwords of the data following this header */
 };
 
-/* memory data for registers dump */
+/* Memory data for registers dump */
 struct dbg_dump_mem {
 	__le32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK       0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT      0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK  0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+#define DBG_DUMP_MEM_ADDRESS_MASK	0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT	0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK	0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT	24
 	__le32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK        0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT       0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK      0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT     24
-#define DBG_DUMP_MEM_RESERVED_MASK      0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT     25
+#define DBG_DUMP_MEM_LENGTH_MASK	0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT	0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK	0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT	24
+#define DBG_DUMP_MEM_RESERVED_MASK	0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT	25
 };
 
-/* register data for registers dump */
+/* Register data for registers dump */
 struct dbg_dump_reg {
 	__le32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK  0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK	0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT	0
+#define DBG_DUMP_REG_WIDE_BUS_MASK	0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT	23
+#define DBG_DUMP_REG_LENGTH_MASK	0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT	24
 };
 
-/* split header for registers dump */
+/* Split header for registers dump */
 struct dbg_dump_split_hdr {
 	__le32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK      0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT     0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK  0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK	0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT	0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK	0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT	24
 };
 
-/* condition header for idle check */
+/* Condition header for idle check */
 struct dbg_idle_chk_cond_hdr {
 	struct dbg_mode_hdr mode; /* Mode header */
 	__le16 data_size; /* size in dwords of the data following this header */
@@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr {
 /* Idle Check condition register */
 struct dbg_idle_chk_cond_reg {
 	__le32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK   0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT  0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK  0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK  0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK	0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT	0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK	0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT	23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK	0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT	24
 	__le16 num_entries;
 	u8 entry_size;
 	u8 start_entry;
@@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg {
 /* Idle Check info register */
 struct dbg_idle_chk_info_reg {
 	__le32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK   0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT  0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK  0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK  0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK	0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT	0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK	0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT	23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK	0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT	24
 	__le16 size; /* register size in dwords */
 	struct dbg_mode_hdr mode; /* Mode header */
 };
@@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule {
 /* Idle Check rule parsing data */
 struct dbg_idle_chk_rule_parsing_data {
 	__le32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK  0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK  0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK	0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT	0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK	0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT	1
 };
 
-/* idle check severity types */
+/* Idle check severity types */
 enum dbg_idle_chk_severity_types {
 	/* idle check failure should cause an error */
 	IDLE_CHK_SEVERITY_ERROR,
@@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types {
 /* Debug Bus block data */
 struct dbg_bus_block_data {
 	__le16 data;
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT      0
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK       0xF
-#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT      4
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK  0xF
-#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK		0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT		0
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK		0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT		4
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK	0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT	8
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK	0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT	12
 	u8 line_num;
 	u8 hw_id;
 };
@@ -2072,6 +2212,7 @@ enum dbg_bus_clients {
 	MAX_DBG_BUS_CLIENTS
 };
 
+/* Debug Bus constraint operation types */
 enum dbg_bus_constraint_ops {
 	DBG_BUS_CONSTRAINT_OP_EQ,
 	DBG_BUS_CONSTRAINT_OP_NE,
@@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops {
 	MAX_DBG_BUS_CONSTRAINT_OPS
 };
 
+/* Debug Bus trigger state data */
 struct dbg_bus_trigger_state_data {
 	u8 data;
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK  0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK      0xF
-#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT     4
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK	0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT	0
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK		0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT		4
 };
 
 /* Debug Bus memory address */
@@ -2165,6 +2307,7 @@ struct dbg_bus_data {
 	struct dbg_bus_storm_data storms[6];
 };
 
+/* Debug bus filter types */
 enum dbg_bus_filter_types {
 	DBG_BUS_FILTER_TYPE_OFF,
 	DBG_BUS_FILTER_TYPE_PRE,
@@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes {
 	MAX_DBG_BUS_FRAME_MODES
 };
 
+/* Debug bus other engine mode */
 enum dbg_bus_other_engine_modes {
 	DBG_BUS_OTHER_ENGINE_MODE_NONE,
 	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
@@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes {
 	MAX_DBG_BUS_OTHER_ENGINE_MODES
 };
 
+/* Debug bus post-trigger recording types */
 enum dbg_bus_post_trigger_types {
 	DBG_BUS_POST_TRIGGER_RECORD,
 	DBG_BUS_POST_TRIGGER_DROP,
 	MAX_DBG_BUS_POST_TRIGGER_TYPES
 };
 
+/* Debug bus pre-trigger recording types */
 enum dbg_bus_pre_trigger_types {
 	DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
 	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
@@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types {
 	MAX_DBG_BUS_PRE_TRIGGER_TYPES
 };
 
+/* Debug bus SEMI frame modes */
 enum dbg_bus_semi_frame_modes {
-	DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
-	    0,
-	DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
-	    3,
+	DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+	DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
 	MAX_DBG_BUS_SEMI_FRAME_MODES
 };
 
@@ -2220,6 +2365,7 @@ enum dbg_bus_states {
 	MAX_DBG_BUS_STATES
 };
 
+/* Debug Bus Storm modes */
 enum dbg_bus_storm_modes {
 	DBG_BUS_STORM_MODE_PRINTF,
 	DBG_BUS_STORM_MODE_PRAM_ADDR,
@@ -2352,7 +2498,7 @@ enum dbg_status {
 	DBG_STATUS_MCP_TRACE_NO_META,
 	DBG_STATUS_MCP_COULD_NOT_HALT,
 	DBG_STATUS_MCP_COULD_NOT_RESUME,
-	DBG_STATUS_DMAE_FAILED,
+	DBG_STATUS_RESERVED2,
 	DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
 	DBG_STATUS_IGU_FIFO_BAD_DATA,
 	DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -2396,7 +2542,8 @@ struct dbg_tools_data {
 	u8 chip_id;
 	u8 platform_id;
 	u8 initialized;
-	u8 reserved;
+	u8 use_dmae;
+	__le32 num_regs_read;
 };
 
 /********************************/
@@ -2406,6 +2553,7 @@ struct dbg_tools_data {
 /* Number of VLAN priorities */
 #define NUM_OF_VLAN_PRIORITIES	8
 
+/* BRB RAM init requirements */
 struct init_brb_ram_req {
 	__le32 guranteed_per_tc;
 	__le32 headroom_per_tc;
@@ -2414,17 +2562,20 @@ struct init_brb_ram_req {
 	u8 num_active_tcs[MAX_NUM_PORTS];
 };
 
+/* ETS per-TC init requirements */
 struct init_ets_tc_req {
 	u8 use_sp;
 	u8 use_wfq;
 	__le16 weight;
 };
 
+/* ETS init requirements */
 struct init_ets_req {
 	__le32 mtu;
 	struct init_ets_tc_req tc_req[NUM_OF_TCS];
 };
 
+/* NIG LB RL init requirements */
 struct init_nig_lb_rl_req {
 	__le16 lb_mac_rate;
 	__le16 lb_rate;
@@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req {
 	__le16 tc_rate[NUM_OF_PHYS_TCS];
 };
 
+/* NIG TC mapping for each priority */
 struct init_nig_pri_tc_map_entry {
 	u8 tc_id;
 	u8 valid;
 };
 
+/* NIG priority to TC map init requirements */
 struct init_nig_pri_tc_map_req {
 	struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
 };
 
+/* QM per-port init parameters */
 struct init_qm_port_params {
 	u8 active;
 	u8 active_phys_tcs;
@@ -2563,7 +2717,7 @@ struct bin_buffer_hdr {
 	__le32 length;
 };
 
-/* binary init buffer types */
+/* Binary init buffer types */
 enum bin_init_buffer_type {
 	BIN_BUF_INIT_FW_VER_INFO,
 	BIN_BUF_INIT_CMD,
@@ -2793,6 +2947,7 @@ struct iro {
 };
 
 /***************************** Public Functions *******************************/
+
 /**
  * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
  *	arrays.
@@ -2802,6 +2957,18 @@ struct iro {
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
 
 /**
+ * @brief qed_read_regs - Reads registers into a buffer (using GRC).
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf - Destination buffer.
+ * @param addr - Source GRC address in dwords.
+ * @param len - Number of registers to read.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
  * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
  *	default value.
  *
@@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
 #define MAX_NAME_LEN	16
 
 /***************************** Public Functions *******************************/
+
 /**
  * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
  *	debug arrays.
@@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
 					   u32 *num_warnings);
 
 /**
+ * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace
+ *	meta data.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ *
+ * @param data - pointer to MCP Trace meta data
+ * @param size - size of MCP Trace meta data in dwords
+ */
+void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size);
+
+/**
  * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
  *	for MCP Trace results (in bytes).
  *
@@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = {
 	0x00000000,		/* bar0_map, bb, 0 lines */
 	0x00000000,		/* bar0_map, k2, 0 lines */
 	0x00000000,
+	0x00000000,		/* bar0_map, bb, 0 lines */
+	0x00000000,		/* bar0_map, k2, 0 lines */
+	0x00000000,
 };
 
 /* Win 2 */
@@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = {
  * Returns the required host memory size in 4KB units.
  * Must be called before all QM init HSI functions.
  *
- * @param pf_id - physical function ID
  * @param num_pf_cids - number of connections used by this PF
  * @param num_vf_cids - number of connections used by VFs of this PF
  * @param num_tids - number of tasks used by this PF
@@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = {
  *
  * @return The required host memory size in 4KB units.
  */
-u32 qed_qm_pf_mem_size(u8 pf_id,
-		       u32 num_pf_cids,
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
 		       u32 num_vf_cids,
 		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
 
@@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params {
 	u8 port_id;
 	u8 pf_id;
 	u8 max_phys_tcs_per_port;
-	bool is_first_pf;
+	bool is_pf_loading;
 	u32 num_pf_cids;
 	u32 num_vf_cids;
 	u32 num_tids;
@@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params {
 	u8 num_vports;
 	u16 pf_wfq;
 	u32 pf_rl;
+	u32 link_speed;
 	struct init_qm_pq_params *pq_params;
 	struct init_qm_vport_params *vport_params;
 };
@@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
  * @param p_ptt - ptt window used for writing the registers
  * @param vport_id - VPORT ID
  * @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
  *
  * @return 0 on success, -1 on error.
  */
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+		      struct qed_ptt *p_ptt,
+		      u8 vport_id, u32 vport_rl, u32 link_speed);
+
 /**
  * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
  *
@@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
  * @param start_pq - first PQ ID to stop
  * @param num_pqs - Number of PQs to stop, starting from start_pq.
  *
- * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting for
+ *	QM command done.
  */
 bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt,
@@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - vxlan destination udp port.
  */
@@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param vxlan_enable - vxlan enable flag.
  */
@@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param eth_gre_enable - eth GRE enable enable flag.
  * @param ip_gre_enable - IP GRE enable enable flag.
@@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
 /**
  * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
  *
+ * @param p_hwfn
  * @param p_ptt - ptt window used for writing the registers.
  * @param dest_port - geneve destination udp port.
  */
@@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
 void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 			   struct qed_ptt *p_ptt,
 			   bool eth_geneve_enable, bool ip_geneve_enable);
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-			      struct qed_ptt *p_ptt, u16 pf_id);
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-			     u16 pf_id, bool tcp, bool udp,
-			     bool ipv4, bool ipv6);
 
-#define	YSTORM_FLOW_CONTROL_MODE_OFFSET			(IRO[0].base)
-#define	YSTORM_FLOW_CONTROL_MODE_SIZE			(IRO[0].size)
-#define	TSTORM_PORT_STAT_OFFSET(port_id) \
+/**
+ * @brief qed_gft_disable - Disable GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable GFT.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * @brief qed_gft_config - Enable and configure HW for GFT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to enable GFT.
+ * @param tcp - set profile tcp packets.
+ * @param udp - set profile udp  packet.
+ * @param ipv4 - set profile ipv4 packet.
+ * @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt,
+		    u16 pf_id,
+		    bool tcp,
+		    bool udp,
+		    bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * @brief qed_enable_context_validation - Enable and configure context
+ *	validation.
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
+ *	session context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+				     u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
+ *	context.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+				  u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * @brief qed_memset_session_ctx - Memset session context to 0 while
+ *	preserving validation bytes.
+ *
+ * @param p_hwfn -
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
+ *	validation bytes.
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET			(IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE			(IRO[0].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
 	(IRO[1].base + ((port_id) * IRO[1].m1))
-#define	TSTORM_PORT_STAT_SIZE				(IRO[1].size)
+#define TSTORM_PORT_STAT_SIZE				(IRO[1].size)
+
+/* Tstorm ll2 port statistics */
 #define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
 	(IRO[2].base + ((port_id) * IRO[2].m1))
 #define TSTORM_LL2_PORT_STAT_SIZE			(IRO[2].size)
-#define	USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
 	(IRO[3].base + ((vf_id) * IRO[3].m1))
-#define	USTORM_VF_PF_CHANNEL_READY_SIZE			(IRO[3].size)
-#define	USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
-	(IRO[4].base + (pf_id) * IRO[4].m1)
-#define	USTORM_FLR_FINAL_ACK_SIZE			(IRO[4].size)
-#define	USTORM_EQE_CONS_OFFSET(pf_id) \
+#define USTORM_VF_PF_CHANNEL_READY_SIZE			(IRO[3].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+	(IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE			(IRO[4].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
 	(IRO[5].base + ((pf_id) * IRO[5].m1))
-#define	USTORM_EQE_CONS_SIZE				(IRO[5].size)
-#define	USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+#define USTORM_EQE_CONS_SIZE				(IRO[5].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
 	(IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define	USTORM_ETH_QUEUE_ZONE_SIZE			(IRO[6].size)
-#define	USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+#define USTORM_ETH_QUEUE_ZONE_SIZE			(IRO[6].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
 	(IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define	USTORM_COMMON_QUEUE_CONS_SIZE			(IRO[7].size)
+#define USTORM_COMMON_QUEUE_CONS_SIZE			(IRO[7].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET			(IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE			(IRO[8].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET			(IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE			(IRO[9].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET			(IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE			(IRO[10].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET			(IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE			(IRO[11].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET			(IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE			(IRO[12].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET			(IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE			(IRO[13].size)
+
+/* Tstorm producers */
 #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
-	(IRO[14].base +	((core_rx_queue_id) * IRO[14].m1))
+	(IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
 #define TSTORM_LL2_RX_PRODS_SIZE			(IRO[14].size)
+
+/* Tstorm LightL2 queue statistics */
 #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
 	(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
 #define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE		(IRO[15].size)
+
+/* Ustorm LiteL2 queue statistics */
 #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
-	(IRO[16].base +	((core_rx_queue_id) * IRO[16].m1))
+	(IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
 #define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE		(IRO[16].size)
+
+/* Pstorm LiteL2 queue statistics */
 #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
-	(IRO[17].base +	((core_tx_stats_id) * IRO[17].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE	(IRO[17].	size)
-#define	MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+	(IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE		(IRO[17].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[18].base + ((stat_counter_id) * IRO[18].m1))
-#define	MSTORM_QUEUE_STAT_SIZE				(IRO[18].size)
-#define	MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+#define MSTORM_QUEUE_STAT_SIZE				(IRO[18].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
 	(IRO[19].base + ((queue_id) * IRO[19].m1))
-#define	MSTORM_ETH_PF_PRODS_SIZE			(IRO[19].size)
+#define MSTORM_ETH_PF_PRODS_SIZE			(IRO[19].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
 #define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
-	(IRO[20].base +	((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+	(IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
 #define MSTORM_ETH_VF_PRODS_SIZE			(IRO[20].size)
-#define	MSTORM_TPA_TIMEOUT_US_OFFSET			(IRO[21].base)
-#define	MSTORM_TPA_TIMEOUT_US_SIZE			(IRO[21].size)
-#define	MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET			(IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE			(IRO[21].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
 	(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define	MSTORM_ETH_PF_STAT_SIZE				(IRO[22].size)
-#define	USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define MSTORM_ETH_PF_STAT_SIZE				(IRO[22].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
-#define	USTORM_QUEUE_STAT_SIZE				(IRO[23].size)
-#define	USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define USTORM_QUEUE_STAT_SIZE				(IRO[23].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\
 	(IRO[24].base + ((pf_id) * IRO[24].m1))
-#define	USTORM_ETH_PF_STAT_SIZE				(IRO[24].size)
-#define	PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+#define USTORM_ETH_PF_STAT_SIZE				(IRO[24].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define	PSTORM_QUEUE_STAT_SIZE				(IRO[25].size)
-#define	PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+#define PSTORM_QUEUE_STAT_SIZE				(IRO[25].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
 	(IRO[26].base + ((pf_id) * IRO[26].m1))
-#define	PSTORM_ETH_PF_STAT_SIZE				(IRO[26].size)
-#define	PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
-	(IRO[27].base + ((ethtype) * IRO[27].m1))
-#define	PSTORM_CTL_FRAME_ETHTYPE_SIZE			(IRO[27].size)
-#define	TSTORM_ETH_PRS_INPUT_OFFSET			(IRO[28].base)
-#define	TSTORM_ETH_PRS_INPUT_SIZE			(IRO[28].size)
-#define	ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+#define PSTORM_ETH_PF_STAT_SIZE				(IRO[26].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
+	(IRO[27].base + ((eth_type_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE			(IRO[27].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET			(IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE			(IRO[28].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
 	(IRO[29].base + ((pf_id) * IRO[29].m1))
-#define	ETH_RX_RATE_LIMIT_SIZE				(IRO[29].size)
-#define	XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+#define ETH_RX_RATE_LIMIT_SIZE				(IRO[29].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
 	(IRO[30].base + ((queue_id) * IRO[30].m1))
-#define	XSTORM_ETH_QUEUE_ZONE_SIZE			(IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_SIZE			(IRO[30].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+	(IRO[31].base + ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE				(IRO[31].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+	(IRO[32].base + ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE				(IRO[32].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+	(IRO[33].base + ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE			(IRO[33].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
 #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
-	(IRO[34].base +	((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE				(IRO[34].size)
+	(IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE			(IRO[34].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
 #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-	(IRO[35].base +	((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE				(IRO[35].size)
+	(IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE			(IRO[35].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
 #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-	(IRO[36].base +	((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE				(IRO[36].size)
+	(IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE			(IRO[36].size)
+
+/* Tstorm iSCSI RX stats */
 #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-	(IRO[37].base +	((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE				(IRO[37].size)
+	(IRO[37].base + ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE			(IRO[37].size)
+
+/* Mstorm iSCSI RX stats */
 #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-	(IRO[38].base +	((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE				(IRO[38].size)
+	(IRO[38].base + ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE			(IRO[38].size)
+
+/* Ustorm iSCSI RX stats */
 #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
-	(IRO[39].base +	((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE				(IRO[39].size)
+	(IRO[39].base + ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE			(IRO[39].size)
+
+/* Xstorm iSCSI TX stats */
 #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-	(IRO[40].base +	((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE				(IRO[40].size)
+	(IRO[40].base + ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE			(IRO[40].size)
+
+/* Ystorm iSCSI TX stats */
 #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-	(IRO[41].base +	((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE				(IRO[41].size)
+	(IRO[41].base + ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE			(IRO[41].size)
+
+/* Pstorm iSCSI TX stats */
 #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
-	(IRO[42].base +	((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE				(IRO[42].size)
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
-	(IRO[45].base +	((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE				(IRO[45].size)
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
-	(IRO[46].base +	((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE				(IRO[46].size)
+	(IRO[42].base + ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE			(IRO[42].size)
+
+/* Tstorm FCoE RX stats */
 #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
-	(IRO[43].base +	((pf_id) * IRO[43].m1))
+	(IRO[43].base + ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE			(IRO[43].size)
+
+/* Pstorm FCoE TX stats */
 #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
 	(IRO[44].base + ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE			(IRO[44].size)
 
-static const struct iro iro_arr[49] = {
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+	(IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE			(IRO[45].size)
+
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+	(IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE			(IRO[46].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+	(IRO[47].base + ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE			(IRO[47].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+	(IRO[48].base + ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE			(IRO[48].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+	(IRO[49].base + ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE		(IRO[49].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+	(IRO[50].base + ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE		(IRO[50].size)
+
+static const struct iro iro_arr[51] = {
 	{0x0, 0x0, 0x0, 0x0, 0x8},
-	{0x4cb0, 0x80, 0x0, 0x0, 0x80},
-	{0x6518, 0x20, 0x0, 0x0, 0x20},
+	{0x4cb8, 0x88, 0x0, 0x0, 0x88},
+	{0x6530, 0x20, 0x0, 0x0, 0x20},
 	{0xb00, 0x8, 0x0, 0x0, 0x4},
 	{0xa80, 0x8, 0x0, 0x0, 0x4},
 	{0x0, 0x8, 0x0, 0x0, 0x2},
 	{0x80, 0x8, 0x0, 0x0, 0x4},
 	{0x84, 0x8, 0x0, 0x0, 0x2},
+	{0x4c48, 0x0, 0x0, 0x0, 0x78},
+	{0x3e18, 0x0, 0x0, 0x0, 0x78},
+	{0x2b58, 0x0, 0x0, 0x0, 0x78},
 	{0x4c40, 0x0, 0x0, 0x0, 0x78},
-	{0x3df0, 0x0, 0x0, 0x0, 0x78},
-	{0x29b0, 0x0, 0x0, 0x0, 0x78},
-	{0x4c38, 0x0, 0x0, 0x0, 0x78},
-	{0x4990, 0x0, 0x0, 0x0, 0x78},
-	{0x7f48, 0x0, 0x0, 0x0, 0x78},
+	{0x4998, 0x0, 0x0, 0x0, 0x78},
+	{0x7f50, 0x0, 0x0, 0x0, 0x78},
 	{0xa28, 0x8, 0x0, 0x0, 0x8},
-	{0x61f8, 0x10, 0x0, 0x0, 0x10},
-	{0xbd20, 0x30, 0x0, 0x0, 0x30},
-	{0x95b8, 0x30, 0x0, 0x0, 0x30},
-	{0x4b60, 0x80, 0x0, 0x0, 0x40},
+	{0x6210, 0x10, 0x0, 0x0, 0x10},
+	{0xb820, 0x30, 0x0, 0x0, 0x30},
+	{0x96c0, 0x30, 0x0, 0x0, 0x30},
+	{0x4b68, 0x80, 0x0, 0x0, 0x40},
 	{0x1f8, 0x4, 0x0, 0x0, 0x4},
-	{0x53a0, 0x80, 0x4, 0x0, 0x4},
-	{0xc7c8, 0x0, 0x0, 0x0, 0x4},
-	{0x4ba0, 0x80, 0x0, 0x0, 0x20},
-	{0x8150, 0x40, 0x0, 0x0, 0x30},
-	{0xec70, 0x60, 0x0, 0x0, 0x60},
-	{0x2b48, 0x80, 0x0, 0x0, 0x38},
-	{0xf1b0, 0x78, 0x0, 0x0, 0x78},
+	{0x53a8, 0x80, 0x4, 0x0, 0x4},
+	{0xc7d0, 0x0, 0x0, 0x0, 0x4},
+	{0x4ba8, 0x80, 0x0, 0x0, 0x20},
+	{0x8158, 0x40, 0x0, 0x0, 0x30},
+	{0xe770, 0x60, 0x0, 0x0, 0x60},
+	{0x2cf0, 0x80, 0x0, 0x0, 0x38},
+	{0xf2b8, 0x78, 0x0, 0x0, 0x78},
 	{0x1f8, 0x4, 0x0, 0x0, 0x4},
-	{0xaef8, 0x0, 0x0, 0x0, 0xf0},
-	{0xafe8, 0x8, 0x0, 0x0, 0x8},
+	{0xaf20, 0x0, 0x0, 0x0, 0xf0},
+	{0xb010, 0x8, 0x0, 0x0, 0x8},
 	{0x1f8, 0x8, 0x0, 0x0, 0x8},
 	{0xac0, 0x8, 0x0, 0x0, 0x8},
 	{0x2578, 0x8, 0x0, 0x0, 0x8},
 	{0x24f8, 0x8, 0x0, 0x0, 0x8},
 	{0x0, 0x8, 0x0, 0x0, 0x8},
-	{0x200, 0x10, 0x8, 0x0, 0x8},
-	{0xb78, 0x10, 0x8, 0x0, 0x2},
-	{0xd9a8, 0x38, 0x0, 0x0, 0x24},
-	{0x12988, 0x10, 0x0, 0x0, 0x8},
-	{0x11fa0, 0x38, 0x0, 0x0, 0x18},
-	{0xa580, 0x38, 0x0, 0x0, 0x10},
-	{0x86f8, 0x30, 0x0, 0x0, 0x18},
-	{0x101f8, 0x10, 0x0, 0x0, 0x10},
-	{0xde28, 0x48, 0x0, 0x0, 0x38},
-	{0x10660, 0x20, 0x0, 0x0, 0x20},
-	{0x2b80, 0x80, 0x0, 0x0, 0x10},
-	{0x5020, 0x10, 0x0, 0x0, 0x10},
-	{0xc9b0, 0x30, 0x0, 0x0, 0x10},
-	{0xeec0, 0x10, 0x0, 0x0, 0x10},
+	{0x400, 0x18, 0x8, 0x0, 0x8},
+	{0xb78, 0x18, 0x8, 0x0, 0x2},
+	{0xd898, 0x50, 0x0, 0x0, 0x3c},
+	{0x12908, 0x18, 0x0, 0x0, 0x10},
+	{0x11aa8, 0x40, 0x0, 0x0, 0x18},
+	{0xa588, 0x50, 0x0, 0x0, 0x20},
+	{0x8700, 0x40, 0x0, 0x0, 0x28},
+	{0x10300, 0x18, 0x0, 0x0, 0x10},
+	{0xde48, 0x48, 0x0, 0x0, 0x38},
+	{0x10768, 0x20, 0x0, 0x0, 0x20},
+	{0x2d28, 0x80, 0x0, 0x0, 0x10},
+	{0x5048, 0x10, 0x0, 0x0, 0x10},
+	{0xc9b8, 0x30, 0x0, 0x0, 0x10},
+	{0xeee0, 0x10, 0x0, 0x0, 0x10},
+	{0xa3a0, 0x10, 0x0, 0x0, 0x10},
+	{0x13108, 0x8, 0x0, 0x0, 0x8},
 };
 
 /* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET	0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET	1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET	2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET	3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET	4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET	5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET	6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET	7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET	8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET	9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET	10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET	11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET	12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET	13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET	14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET	15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET	16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET	17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET	18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET	19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET	20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET	21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET	22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET	23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET	24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET	761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE	736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET	761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE	736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET	1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE	736
-#define CAU_REG_PI_MEMORY_RT_OFFSET	2233
-#define CAU_REG_PI_MEMORY_RT_SIZE	4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET	6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET	6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET	6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET	6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET	6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET	6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET	6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET	6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET	6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET	6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET	6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET	6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET	6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET	6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET	6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET	6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET	6665
-#define SRC_REG_FIRSTFREE_RT_SIZE	2
-#define SRC_REG_LASTFREE_RT_OFFSET	6667
-#define SRC_REG_LASTFREE_RT_SIZE	2
-#define SRC_REG_COUNTFREE_RT_OFFSET	6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET	6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET	6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET	6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET	6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET	6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET	6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET	6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET	6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET	6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET	6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET	6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET	6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET	6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET	6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET	6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET	6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET	6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET	6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET	6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET	6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET	6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET	6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET	6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET	6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET	6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET	6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET	6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET	6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET	6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET	6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET	6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET	6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET	6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE	22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET	28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET	28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET	28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET	28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET	28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET	28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET	28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET	28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET	28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET	28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET	28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET	28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET	28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE	416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET	29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE	608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET	29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET	29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET	29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET	29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET	29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET	29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET	29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET	29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET	29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET	29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET	29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET	29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET	29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET	29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET	29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET	29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET	29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET	29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET	29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET	29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET	29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET	29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET	29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET	29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET	29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET	29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET	29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET	29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET	29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET	29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET	29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET	29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET	29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET	29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET	29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET	29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET	29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET	29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET	29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET	29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET	29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET	29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET	29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET	29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET	29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET	29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET	29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET	29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET	29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET	29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET	29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET	29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET	29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET	29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET	29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET	29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET	29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET	29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET	29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET	29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET	29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET	29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET	29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET	29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET	29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET	29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET	29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET	29805
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE	128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET	29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET	29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET	29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET	29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET	29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET	29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET	29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET	29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET	29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET	29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET	29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET	29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET	29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET	29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET	29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET	29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET	29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET	29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET	29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET	29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET	29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET	29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET	29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET	29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET	29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET	29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET	29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET	29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET	29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET	29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET	29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET	29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET	29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET	29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET	29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET	29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET	29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET	29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET	29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET	29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET	29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET	29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET	29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET	29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET	29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET	29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET	29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET	29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET	29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET	29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET	29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET	29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET	29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET	29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET	29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET	29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET	29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET	29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET	29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET	29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET	29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET	29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET	29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET	29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET	29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET	29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET	29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET	30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET	30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET	30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET	30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET	30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET	30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET	30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET	30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET	30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET	30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET	30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET	30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET	30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET	30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET	30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET	30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET	30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET	30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET	30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET	30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET	30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET	30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET	30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET	30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET	30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET	30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET	30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET	30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET	30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET	30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET	30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET	30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET	30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET	30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET	30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET	30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET	30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET	30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET	30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET	30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET	30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET	30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET	30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET	30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET	30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET	30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET	30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET	30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET	30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET	30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET	30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET	30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET	30052
-#define QM_REG_RLGLBLINCVAL_RT_SIZE	256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET	30308
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE	256
-#define QM_REG_RLGLBLCRD_RT_OFFSET	30564
-#define QM_REG_RLGLBLCRD_RT_SIZE	256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET	30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET	30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET	30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET	30823
-#define QM_REG_RLPFINCVAL_RT_SIZE	16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET	30839
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE	16
-#define QM_REG_RLPFCRD_RT_OFFSET	30855
-#define QM_REG_RLPFCRD_RT_SIZE	16
-#define QM_REG_RLPFENABLE_RT_OFFSET	30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET	30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET	30873
-#define QM_REG_WFQPFWEIGHT_RT_SIZE	16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET	30889
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE	16
-#define QM_REG_WFQPFCRD_RT_OFFSET	30905
-#define QM_REG_WFQPFCRD_RT_SIZE	256
-#define QM_REG_WFQPFENABLE_RT_OFFSET	31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET	31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET	31163
-#define QM_REG_BASEADDRTXPQ_RT_SIZE	512
-#define QM_REG_TXPQMAP_RT_OFFSET	31675
-#define QM_REG_TXPQMAP_RT_SIZE	512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET	32187
-#define QM_REG_WFQVPWEIGHT_RT_SIZE	512
-#define QM_REG_WFQVPCRD_RT_OFFSET	32699
-#define QM_REG_WFQVPCRD_RT_SIZE	512
-#define QM_REG_WFQVPMAP_RT_OFFSET	33211
-#define QM_REG_WFQVPMAP_RT_SIZE	512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET	33723
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE	320
-#define QM_REG_VOQCRDLINE_RT_OFFSET	34043
-#define QM_REG_VOQCRDLINE_RT_SIZE	36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET	34079
-#define QM_REG_VOQINITCRDLINE_RT_SIZE	36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET	34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET	34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET	34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET	34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET	34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET	34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET	34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET	34122
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE	4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET	34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE	4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET	34130
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE	4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET	34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET	34135
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE	32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET	34167
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE	16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET	34183
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE	16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET	34199
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE	16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET	34215
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE	16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET	34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET	34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET	34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET	34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET	34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET	34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET	34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET	34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET	34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET	34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET	34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET	34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET	34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET	34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET	34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET	34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET	34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET	34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET	34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET	34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET	34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET	34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET	34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET	34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET	34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET	34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET	34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET	34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET	34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET	34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET	34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET	34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET	34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET	34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET	34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET	34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET	34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET	34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET	34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET	34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET	34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET	34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET	34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET	34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET	34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET	34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET	34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET	34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET	34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET	34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET	34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET	34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET	34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET	34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET	34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET	34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET	34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET	34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET	34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET	34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET	34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET	34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET	34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET	34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET	34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET	34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET	34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET	34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET	34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET	34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET	34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET	34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET	34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET	34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET	34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET	34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET	34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET	34308
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET			0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET			1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET			2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET			3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET			4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET			5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET			6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET			7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET			8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET			9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET			10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET			11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET			12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET			13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET			14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET			15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET				16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET			17
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET			18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET			19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET		20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET		21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET			22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET			23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET			24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET			25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET			26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET			27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET			28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET			29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET		30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET		31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET		32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET		33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET		34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET		35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET		36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET		37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET			38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET			39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET			40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET			41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET			42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET			43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET			44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET				45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE				1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET			1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE				1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET				2093
+#define CAU_REG_PI_MEMORY_RT_SIZE				4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET		6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET		6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET		6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET			6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET			6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET				6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET				6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET				6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET			6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET			6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET			6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET		6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET	6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET		6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET			6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET			6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET				6525
+#define SRC_REG_FIRSTFREE_RT_SIZE				2
+#define SRC_REG_LASTFREE_RT_OFFSET				6527
+#define SRC_REG_LASTFREE_RT_SIZE				2
+#define SRC_REG_COUNTFREE_RT_OFFSET				6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET			6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET			6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET			6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET				6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET				6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET				6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET			6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET			6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET			6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET			6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET			6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET			6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET			6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET			6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET			6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET			6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET			6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET			6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET			6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET		6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET		6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET		6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET			6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET			6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET			6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET			6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET			6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET			6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET				6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET			6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET			6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET			6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET			6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET			6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET			6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET			6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET				6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE				26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET				32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET		32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET			32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET			32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET			32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET			32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET			32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET				32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET				32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET				32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET		32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET		32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET			32992
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE				416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET			33408
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE				608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET				34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET				34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET				34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET			34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET			34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET			34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET			34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET			34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET			34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET			34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET			34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET			34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET			34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET			34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET			34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET			34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET			34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET			34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET			34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET			34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET			34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET			34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET			34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET			34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET			34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET			34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET			34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET			34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET			34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET			34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET			34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET			34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET			34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET			34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET			34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET			34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET			34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET			34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET			34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET			34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET			34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET			34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET			34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET			34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET			34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET			34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET			34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET			34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET			34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET			34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET			34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET			34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET			34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET			34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET			34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET			34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET			34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET			34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET			34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET			34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET			34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET			34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET			34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET			34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET			34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET			34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET			34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET			34083
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE				128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET				34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE				256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET			34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET			34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET			34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET			34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET			34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET			34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET			34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET			34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET			34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET			34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET			34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET			34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET			34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET			34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET			34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET			34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET			34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET			34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET			34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET			34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET			34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET			34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET			34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET			34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET			34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET			34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET			34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET				34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET				34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET				34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET				34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET				34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET				34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET				34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET				34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET				34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET				34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET				34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET				34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET				34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET				34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET				34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET				34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET				34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET				34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET				34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET				34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET				34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET				34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET				34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET				34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET				34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET				34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET				34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET				34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET				34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET				34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET				34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET				34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET				34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET				34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET				34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET				34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET				34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET				34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET				34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET				34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET				34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET				34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET				34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET				34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET				34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET				34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET				34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET				34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET				34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET				34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET				34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET				34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET				34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET				34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET				34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET				34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET				34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET				34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET				34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET				34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET				34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET				34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET				34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET				34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET				34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET				34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET				34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET				34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET				34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET				34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET				34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET				34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET				34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET				34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET				34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET				34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET				34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET				34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET				34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET				34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET				34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET				34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET			34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET			34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET			34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET			34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET			34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET			34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET			34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET			34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET			34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET			34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET				34586
+#define QM_REG_RLGLBLINCVAL_RT_SIZE				256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET			34842
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE				256
+#define QM_REG_RLGLBLCRD_RT_OFFSET				35098
+#define QM_REG_RLGLBLCRD_RT_SIZE				256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET				35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET				35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET			35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET				35357
+#define QM_REG_RLPFINCVAL_RT_SIZE				16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET				35373
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE				16
+#define QM_REG_RLPFCRD_RT_OFFSET				35389
+#define QM_REG_RLPFCRD_RT_SIZE					16
+#define QM_REG_RLPFENABLE_RT_OFFSET				35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET				35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET				35407
+#define QM_REG_WFQPFWEIGHT_RT_SIZE				16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET			35423
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE				16
+#define QM_REG_WFQPFCRD_RT_OFFSET				35439
+#define QM_REG_WFQPFCRD_RT_SIZE					256
+#define QM_REG_WFQPFENABLE_RT_OFFSET				35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET				35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET				35697
+#define QM_REG_BASEADDRTXPQ_RT_SIZE				512
+#define QM_REG_TXPQMAP_RT_OFFSET				36209
+#define QM_REG_TXPQMAP_RT_SIZE					512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET				36721
+#define QM_REG_WFQVPWEIGHT_RT_SIZE				512
+#define QM_REG_WFQVPCRD_RT_OFFSET				37233
+#define QM_REG_WFQVPCRD_RT_SIZE					512
+#define QM_REG_WFQVPMAP_RT_OFFSET				37745
+#define QM_REG_WFQVPMAP_RT_SIZE					512
+#define QM_REG_PTRTBLTX_RT_OFFSET				38257
+#define QM_REG_PTRTBLTX_RT_SIZE					1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET				39281
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE				320
+#define QM_REG_VOQCRDLINE_RT_OFFSET				39601
+#define QM_REG_VOQCRDLINE_RT_SIZE				36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET				39637
+#define QM_REG_VOQINITCRDLINE_RT_SIZE				36
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET			39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET			39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET			39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET			39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET			39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET			39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET			39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET		39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET			39681
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE				4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET			39685
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE			4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET			39689
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE			32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET			39721
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE			16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET			39737
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE			16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET		39753
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE		16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET		39769
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE			16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET				39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET		39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET			39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE			8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET		39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE		1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET		40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE		512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET		41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE		512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET	41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE	512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET	42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE		512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET		42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE			32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET			42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET			42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET			42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET			42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET			42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET			42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET			42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET		42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET		42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET		42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET		42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET			42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET			42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET			42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET			42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET		42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET			42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET		42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET		42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET			42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET		42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET		42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET			42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET		42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET		42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET			42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET		42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET		42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET			42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET		42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET		42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET			42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET		42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET		42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET			42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET		42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET		42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET			42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET		42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET		42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET			42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET		42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET		42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET			42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET		42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET		42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET			42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET		42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET		42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET			42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET		42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET		42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET			42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET		42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET		42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET			42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET		42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET		42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET			42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET		42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET		42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET			42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET		42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET		42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET			42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET		42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET		42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET			42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET		42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET		42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET			42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET		42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET		42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET			42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET		42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET		42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET			42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET		42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET		42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET			42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET		42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET		42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET			42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET		42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET		42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET			42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET		42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET		42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET			42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET		42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET		42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET			42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET		42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET		42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET			42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET		42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET		42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET			42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET		42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET		42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET			42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET		43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET		43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET			43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET		43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET		43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET			43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET		43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET		43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET			43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET		43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET		43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET			43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET		43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET		43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET			43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET		43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET		43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET			43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET		43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET		43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET			43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET		43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET				43022
 
-#define RUNTIME_ARRAY_SIZE 34309
+#define RUNTIME_ARRAY_SIZE	43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB	0
 
 /* The eth storm context for the Tstorm */
 struct tstorm_eth_conn_st_ctx {
@@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx {
 	__le32 reserved[60];
 };
 
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
 	u8 reserved0;
-	u8 eth_state;
+	u8 state;
 	u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT	7
 		u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT	7
 	u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT	6
 		u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT	0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK			0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT			4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT		6
 	u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT		2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT			4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT			5
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT	1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK			0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT		0
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT		1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT		2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT		3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT		4
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT		7
 	u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT		4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT		5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT		6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT		7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT		0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK		0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT		1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT	0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT	1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT	4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK		0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT		6
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT	0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT	1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT	2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT	3
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT		4
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK		0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK		0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT		6
 	u8 edpm_event_id;
 	__le16 physical_q0;
-	__le16 ereserved1;
+	__le16 e5_reserved1;
 	__le16 edpm_num_bds;
 	__le16 tx_bd_cons;
 	__le16 tx_bd_prod;
@@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx {
 	u8 byte13;
 	u8 byte14;
 	u8 byte15;
-	u8 ereserved;
+	u8 e5_reserved;
 	__le16 word11;
 	__le32 reg10;
 	__le32 reg11;
@@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx {
 	__le32 reserved[8];
 };
 
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 state;
 	u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK		0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT		0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK		0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT		1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK	0x3
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT	2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK	0x3
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT	4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT			0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT			1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK	0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT	2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK		0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT	4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT			6
 	u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK		0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT	1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT			4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT			5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT			6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK			0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT			7
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	0
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK	0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT	1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT			4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT			5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT			6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK			0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT			7
 	u8 tx_q0_int_coallecing_timeset;
 	u8 byte3;
 	__le16 word0;
@@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx {
 	__le32 reg3;
 };
 
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT		1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT		3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT	3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT	6
 	u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT	6
 	u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK			0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK		0x3
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT		6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT		7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT	6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT		1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT		3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT		6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT		7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT	0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT	1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT	2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT	3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT	4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT	6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT		0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT		1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT		2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT		3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT		5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK		0x1
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT		7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT	5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 reg2;
@@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx {
 	__le32 reg10;
 };
 
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT			0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT			1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK		0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT	2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK		0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT	4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK				0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT			6
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT			0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT			1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK	0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT	2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK	0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT	4
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT			6
 	u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK				0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT			0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK			0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT			2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK			0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT			4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK		0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT		6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK			0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT			0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK		0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT		2
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK		0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT		4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK	0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT	6
 	u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK	0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT	0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK	0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT	1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT			3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK		0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT		4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK		0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT		5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			7
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT	0
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT	1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT		4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT		5
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT	6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT			0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT			1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT			2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT			3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT			4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT			5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT			6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK			0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT			7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx {
 };
 
 /* eth connection context */
-struct eth_conn_context {
+struct e4_eth_conn_context {
 	struct tstorm_eth_conn_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2];
 	struct pstorm_eth_conn_st_ctx pstorm_st_context;
 	struct xstorm_eth_conn_st_ctx xstorm_st_context;
-	struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+	struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
 	struct ystorm_eth_conn_st_ctx ystorm_st_context;
-	struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
-	struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
-	struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+	struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
+	struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+	struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
 	struct ustorm_eth_conn_st_ctx ustorm_st_context;
 	struct mstorm_eth_conn_st_ctx mstorm_st_context;
 };
 
+/* Ethernet filter types: mac/vlan/pair */
 enum eth_error_code {
 	ETH_OK = 0x00,
 	ETH_FILTERS_MAC_ADD_FAIL_FULL,
@@ -4972,6 +5472,7 @@ enum eth_error_code {
 	MAX_ETH_ERROR_CODE
 };
 
+/* Opcodes for the event ring */
 enum eth_event_opcode {
 	ETH_EVENT_UNUSED,
 	ETH_EVENT_VPORT_START,
@@ -4983,13 +5484,14 @@ enum eth_event_opcode {
 	ETH_EVENT_RX_QUEUE_UPDATE,
 	ETH_EVENT_RX_QUEUE_STOP,
 	ETH_EVENT_FILTERS_UPDATE,
-	ETH_EVENT_RESERVED,
-	ETH_EVENT_RESERVED2,
-	ETH_EVENT_RESERVED3,
+	ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+	ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+	ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
 	ETH_EVENT_RX_ADD_UDP_FILTER,
 	ETH_EVENT_RX_DELETE_UDP_FILTER,
-	ETH_EVENT_RESERVED4,
-	ETH_EVENT_RESERVED5,
+	ETH_EVENT_RX_CREATE_GFT_ACTION,
+	ETH_EVENT_RX_GFT_UPDATE_FILTER,
+	ETH_EVENT_TX_QUEUE_UPDATE,
 	MAX_ETH_EVENT_OPCODE
 };
 
@@ -5039,6 +5541,7 @@ enum eth_filter_type {
 	MAX_ETH_FILTER_TYPE
 };
 
+/* Eth IPv4 Fragment Type */
 enum eth_ipv4_frag_type {
 	ETH_IPV4_NOT_FRAG,
 	ETH_IPV4_FIRST_FRAG,
@@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type {
 	MAX_ETH_IPV4_FRAG_TYPE
 };
 
+/* eth IPv4 Fragment Type */
 enum eth_ip_type {
 	ETH_IPV4,
 	ETH_IPV6,
 	MAX_ETH_IP_TYPE
 };
 
+/* Ethernet Ramrod Command IDs */
 enum eth_ramrod_cmd_id {
 	ETH_RAMROD_UNUSED,
 	ETH_RAMROD_VPORT_START,
@@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id {
 	ETH_RAMROD_RX_DELETE_UDP_FILTER,
 	ETH_RAMROD_RX_CREATE_GFT_ACTION,
 	ETH_RAMROD_GFT_UPDATE_FILTER,
+	ETH_RAMROD_TX_QUEUE_UPDATE,
 	MAX_ETH_RAMROD_CMD_ID
 };
 
-/* return code from eth sp ramrods */
+/* Return code from eth sp ramrods */
 struct eth_return_code {
 	u8 value;
 #define ETH_RETURN_CODE_ERR_CODE_MASK	0x1F
@@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode {
 	__le16 reserved2[3];
 };
 
+/* GFT filter update action type */
 enum gft_filter_update_action {
 	GFT_ADD_FILTER,
 	GFT_DELETE_FILTER,
 	MAX_GFT_FILTER_UPDATE_ACTION
 };
 
-enum gft_logic_filter_type {
-	GFT_FILTER_TYPE,
-	RFS_FILTER_TYPE,
-	MAX_GFT_LOGIC_FILTER_TYPE
-};
-
+/* Ramrod data for rx add openflow filter */
 struct rx_add_openflow_filter_data {
 	__le16 action_icid;
 	u8 priority;
@@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data {
 	__le16 l4_src_port;
 };
 
+/* Ramrod data for rx create gft action */
 struct rx_create_gft_action_data {
 	u8 vport_id;
 	u8 reserved[7];
 };
 
+/* Ramrod data for rx create openflow action */
 struct rx_create_openflow_action_data {
 	u8 vport_id;
 	u8 reserved[7];
@@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data {
 	struct regpair reserved2;
 };
 
-/* Ramrod data for rx queue start ramrod */
+/* Ramrod data for rx queue stop ramrod */
 struct rx_queue_stop_ramrod_data {
 	__le16 rx_queue_id;
 	u8 complete_cqe_flg;
@@ -5324,14 +5828,22 @@ struct rx_udp_filter_data {
 	__le32 tenant_id;
 };
 
+/* Add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow.
+ */
 struct rx_update_gft_filter_data {
 	struct regpair pkt_hdr_addr;
 	__le16 pkt_hdr_length;
-	__le16 rx_qid_or_action_icid;
-	u8 vport_id;
-	u8 filter_type;
+	__le16 action_icid;
+	__le16 rx_qid;
+	__le16 flow_id;
+	__le16 vport_id;
+	u8 action_icid_valid;
+	u8 rx_qid_valid;
+	u8 flow_id_valid;
 	u8 filter_action;
 	u8 assert_on_error;
+	u8 reserved;
 };
 
 /* Ramrod data for rx queue start ramrod */
@@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data {
 	__le16 reserved[4];
 };
 
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+	__le16 update_qm_pq_id_flg;
+	__le16 qm_pq_id;
+	__le32 reserved0;
+	struct regpair reserved1[5];
+};
+
 /* Ramrod data for vport update ramrod */
 struct vport_filter_update_ramrod_data {
 	struct eth_filter_cmd_header filter_cmd_hdr;
@@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data {
 	struct eth_vport_rss_config rss_config;
 };
 
-struct xstorm_eth_conn_agctxdq_ext_ldpart {
+struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
 	u8 reserved0;
-	u8 eth_state;
+	u8 state;
 	u8 flags0;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT		2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT		7
 	u8 flags1;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT		2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT		3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT	7
 	u8 flags2;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK	0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT	6
 	u8 flags7;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK		0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT	1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT			0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT			1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT			2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT			3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT			4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT			5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT	7
 	u8 flags10;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT			0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT		2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT		3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK			0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT		7
 	u8 flags11;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT	0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT	1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT		3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT		6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT		1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT	4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT	5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT	6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK	0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK	0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT		0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT	1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT	2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT	3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT		4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK		0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT		5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK			0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT		6
 	u8 edpm_event_id;
 	__le16 physical_q0;
-	__le16 ereserved1;
+	__le16 e5_reserved1;
 	__le16 edpm_num_bds;
 	__le16 tx_bd_cons;
 	__le16 tx_bd_prod;
@@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart {
 	__le32 reg4;
 };
 
-struct mstorm_eth_conn_ag_ctx {
+struct e4_mstorm_eth_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT		2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT		4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT		6
 	u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct xstorm_eth_hw_conn_ag_ctx {
+struct e4_xstorm_eth_hw_conn_ag_ctx {
 	u8 reserved0;
-	u8 eth_state;
+	u8 state;
 	u8 flags0;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT	7
 	u8 flags1;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT		0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT		2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT		5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT		7
 	u8 flags2;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK			0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK		0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT		6
 	u8 flags7;
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT	7
 	u8 flags8;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT		0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT		2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT		3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT		5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT			0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT			2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT			3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT			4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT			6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK			0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT			7
 	u8 flags11;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT		0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT		1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT		6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK	0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT	0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT	1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT	2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT	3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT	4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK		0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT		6
 	u8 edpm_event_id;
 	__le16 physical_q0;
-	__le16 ereserved1;
+	__le16 e5_reserved1;
 	__le16 edpm_num_bds;
 	__le16 tx_bd_cons;
 	__le16 tx_bd_prod;
@@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx {
 	__le16 conn_dpi;
 };
 
+/* GFT CAM line struct */
 struct gft_cam_line {
 	__le32 camline;
 #define GFT_CAM_LINE_VALID_MASK		0x1
@@ -5975,6 +6496,7 @@ struct gft_cam_line {
 #define GFT_CAM_LINE_RESERVED1_SHIFT	29
 };
 
+/* GFT CAM line struct with fields breakout */
 struct gft_cam_line_mapped {
 	__le32 camline;
 #define GFT_CAM_LINE_MAPPED_VALID_MASK				0x1
@@ -6008,28 +6530,31 @@ union gft_cam_line_union {
 	struct gft_cam_line_mapped cam_line_mapped;
 };
 
+/* Used in gft_profile_key: Indication for ip version */
 enum gft_profile_ip_version {
 	GFT_PROFILE_IPV4 = 0,
 	GFT_PROFILE_IPV6 = 1,
 	MAX_GFT_PROFILE_IP_VERSION
 };
 
+/* Profile key stucr fot GFT logic in Prs */
 struct gft_profile_key {
 	__le16 profile_key;
-#define GFT_PROFILE_KEY_IP_VERSION_MASK           0x1
-#define GFT_PROFILE_KEY_IP_VERSION_SHIFT          0
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK    0x1
-#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT   1
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK  0xF
-#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK          0xF
-#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT         6
-#define GFT_PROFILE_KEY_PF_ID_MASK                0xF
-#define GFT_PROFILE_KEY_PF_ID_SHIFT               10
-#define GFT_PROFILE_KEY_RESERVED0_MASK            0x3
-#define GFT_PROFILE_KEY_RESERVED0_SHIFT           14
+#define GFT_PROFILE_KEY_IP_VERSION_MASK			0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT		0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK		0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT		1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK	0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT	2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK		0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT		6
+#define GFT_PROFILE_KEY_PF_ID_MASK			0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT			10
+#define GFT_PROFILE_KEY_RESERVED0_MASK			0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT			14
 };
 
+/* Used in gft_profile_key: Indication for tunnel type */
 enum gft_profile_tunnel_type {
 	GFT_PROFILE_NO_TUNNEL = 0,
 	GFT_PROFILE_VXLAN_TUNNEL = 1,
@@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type {
 	MAX_GFT_PROFILE_TUNNEL_TYPE
 };
 
+/* Used in gft_profile_key: Indication for protocol type */
 enum gft_profile_upper_protocol_type {
 	GFT_PROFILE_ROCE_PROTOCOL = 0,
 	GFT_PROFILE_RROCE_PROTOCOL = 1,
@@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type {
 	MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
 };
 
+/* GFT RAM line struct */
 struct gft_ram_line {
 	__le32 lo;
 #define GFT_RAM_LINE_VLAN_SELECT_MASK			0x3
@@ -6149,6 +6676,7 @@ struct gft_ram_line {
 #define GFT_RAM_LINE_RESERVED1_SHIFT			10
 };
 
+/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
 enum gft_vlan_select {
 	INNER_PROVIDER_VLAN = 0,
 	INNER_VLAN = 1,
@@ -6157,10 +6685,205 @@ enum gft_vlan_select {
 	MAX_GFT_VLAN_SELECT
 };
 
+/* The rdma task context of Mstorm */
+struct ystorm_rdma_task_st_ctx {
+	struct regpair temp[4];
+};
+
+struct e4_ystorm_rdma_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 msem_ctx_upd_seq;
+	u8 flags0;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK			0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT			6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK			0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT			7
+	u8 flags1;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT		0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT		2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK	0x3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT	4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT		6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT		7
+	u8 flags2;
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT		0
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 key;
+	__le32 mw_cnt;
+	u8 ref_cnt_seq;
+	u8 ctx_upd_seq;
+	__le16 dif_flags;
+	__le16 tx_ref_count;
+	__le16 last_used_ltid;
+	__le16 parent_mr_lo;
+	__le16 parent_mr_hi;
+	__le32 fbo_lo;
+	__le32 fbo_hi;
+};
+
+struct e4_mstorm_rdma_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 icid;
+	u8 flags0;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK			0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT			6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK			0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT			7
+	u8 flags1;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT	0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT	2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT	4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT	6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT	7
+	u8 flags2;
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT		0
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 key;
+	__le32 mw_cnt;
+	u8 ref_cnt_seq;
+	u8 ctx_upd_seq;
+	__le16 dif_flags;
+	__le16 tx_ref_count;
+	__le16 last_used_ltid;
+	__le16 parent_mr_lo;
+	__le16 parent_mr_hi;
+	__le32 fbo_lo;
+	__le32 fbo_hi;
+};
+
+/* The roce task context of Mstorm */
 struct mstorm_rdma_task_st_ctx {
 	struct regpair temp[4];
 };
 
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+	struct regpair temp[2];
+};
+
+struct e4_ustorm_rdma_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 icid;
+	u8 flags0;
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT		5
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK	0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT	6
+	u8 flags1;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK	0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK		0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT		2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK			0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT			4
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK		0x3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT		6
+	u8 flags2;
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK	0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT		1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT		2
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT	4
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT		5
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT		6
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT		7
+	u8 flags3;
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	0
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	2
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT	3
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK	0xF
+#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT	4
+	__le32 dif_err_intervals;
+	__le32 dif_error_1st_interval;
+	__le32 reg2;
+	__le32 dif_runt_value;
+	__le32 reg4;
+	__le32 reg5;
+};
+
+/* RDMA task context */
+struct e4_rdma_task_context {
+	struct ystorm_rdma_task_st_ctx ystorm_st_context;
+	struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+	struct tdif_task_context tdif_context;
+	struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+	struct mstorm_rdma_task_st_ctx mstorm_st_context;
+	struct rdif_task_context rdif_context;
+	struct ustorm_rdma_task_st_ctx ustorm_st_context;
+	struct regpair ustorm_st_padding[2];
+	struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+/* rdma function init ramrod data */
 struct rdma_close_func_ramrod_data {
 	u8 cnq_start_offset;
 	u8 num_cnqs;
@@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data {
 	u8 reserved[4];
 };
 
+/* rdma function init CNQ parameters */
 struct rdma_cnq_params {
 	__le16 sb_num;
 	u8 sb_index;
@@ -6179,6 +6903,7 @@ struct rdma_cnq_params {
 	u8 reserved1[6];
 };
 
+/* rdma create cq ramrod data */
 struct rdma_create_cq_ramrod_data {
 	struct regpair cq_handle;
 	struct regpair pbl_addr;
@@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data {
 	__le16 reserved1;
 };
 
+/* rdma deregister tid ramrod data */
 struct rdma_deregister_tid_ramrod_data {
 	__le32 itid;
 	__le32 reserved;
 };
 
+/* rdma destroy cq output params */
 struct rdma_destroy_cq_output_params {
 	__le16 cnq_num;
 	__le16 reserved0;
 	__le32 reserved1;
 };
 
+/* rdma destroy cq ramrod data */
 struct rdma_destroy_cq_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* RDMA slow path EQ cmd IDs */
 enum rdma_event_opcode {
 	RDMA_EVENT_UNUSED,
 	RDMA_EVENT_FUNC_INIT,
@@ -6223,6 +6952,7 @@ enum rdma_event_opcode {
 	MAX_RDMA_EVENT_OPCODE
 };
 
+/* RDMA FW return code for slow path ramrods */
 enum rdma_fw_return_code {
 	RDMA_RETURN_OK = 0,
 	RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
@@ -6232,20 +6962,24 @@ enum rdma_fw_return_code {
 	MAX_RDMA_FW_RETURN_CODE
 };
 
+/* rdma function init header */
 struct rdma_init_func_hdr {
 	u8 cnq_start_offset;
 	u8 num_cnqs;
 	u8 cq_ring_mode;
 	u8 vf_id;
 	u8 vf_valid;
-	u8 reserved[3];
+	u8 relaxed_ordering;
+	u8 reserved[2];
 };
 
+/* rdma function init ramrod data */
 struct rdma_init_func_ramrod_data {
 	struct rdma_init_func_hdr params_header;
 	struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
 };
 
+/* RDMA ramrod command IDs */
 enum rdma_ramrod_cmd_id {
 	RDMA_RAMROD_UNUSED,
 	RDMA_RAMROD_FUNC_INIT,
@@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id {
 	MAX_RDMA_RAMROD_CMD_ID
 };
 
+/* rdma register tid ramrod data */
 struct rdma_register_tid_ramrod_data {
 	__le16 flags;
 #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK	0x1F
 #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT	0
 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT	5
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT	6
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT	7
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT	8
-#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK	0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT		6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT		7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT		8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK		0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT	9
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT	10
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT	11
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT	12
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT		11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT		12
 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT	13
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK	0x3
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT	14
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK		0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT		14
 	u8 flags1;
 #define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK	0x1F
-#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK	0x7
-#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT	5
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT	0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK		0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT		5
 	u8 flags2;
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK	0x1
-#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT	0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK		0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT		0
 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK	0x1
 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT	1
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK	0x3F
-#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT	2
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK		0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT		2
 	u8 key;
 	u8 length_hi;
 	u8 vf_id;
@@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data {
 	__le32 reserved4[2];
 };
 
+/* rdma resize cq output params */
 struct rdma_resize_cq_output_params {
 	__le32 old_cq_cons;
 	__le32 old_cq_prod;
 };
 
+/* rdma resize cq ramrod data */
 struct rdma_resize_cq_ramrod_data {
 	u8 flags;
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK        0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT       0
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK  0x1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK          0x3F
-#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT         2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK		0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT		0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK	0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT	1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK		0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT		2
 	u8 pbl_log_page_size;
 	__le16 pbl_num_pages;
 	__le32 max_cqes;
@@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* The rdma storm context of Mstorm */
 struct rdma_srq_context {
 	struct regpair temp[8];
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_create_ramrod_data {
 	struct regpair pbl_base_addr;
 	__le16 pages_in_srq_pbl;
@@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data {
 	struct regpair producers_addr;
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_destroy_ramrod_data {
 	struct rdma_srq_id srq_id;
 	__le32 reserved;
 };
 
+/* rdma create qp requester ramrod data */
 struct rdma_srq_modify_ramrod_data {
 	struct rdma_srq_id srq_id;
 	__le32 wqe_limit;
 };
 
-struct ystorm_rdma_task_st_ctx {
-	struct regpair temp[4];
-};
-
-struct ystorm_rdma_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 msem_ctx_upd_seq;
-	u8 flags0;
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT           6
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
-	u8 flags1;
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK       0x3
-#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT      4
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
-#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
-	u8 flags2;
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK             0x1
-#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT            0
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
-	u8 key;
-	__le32 mw_cnt;
-	u8 ref_cnt_seq;
-	u8 ctx_upd_seq;
-	__le16 dif_flags;
-	__le16 tx_ref_count;
-	__le16 last_used_ltid;
-	__le16 parent_mr_lo;
-	__le16 parent_mr_hi;
-	__le32 fbo_lo;
-	__le32 fbo_hi;
-};
-
-struct mstorm_rdma_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 icid;
-	u8 flags0;
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT            5
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT            6
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK             0x1
-#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT            7
-	u8 flags1;
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT             0
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT             2
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK              0x3
-#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT             4
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT           6
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT           7
-	u8 flags2;
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK            0x1
-#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT           0
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT         1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT         2
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT         3
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT         4
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT         5
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT         6
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT         7
-	u8 key;
-	__le32 mw_cnt;
-	u8 ref_cnt_seq;
-	u8 ctx_upd_seq;
-	__le16 dif_flags;
-	__le16 tx_ref_count;
-	__le16 last_used_ltid;
-	__le16 parent_mr_lo;
-	__le16 parent_mr_hi;
-	__le32 fbo_lo;
-	__le32 fbo_hi;
-};
-
-struct ustorm_rdma_task_st_ctx {
-	struct regpair temp[2];
-};
-
-struct ustorm_rdma_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 icid;
-	u8 flags0;
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK         0xF
-#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT        0
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK            0x1
-#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT           4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK          0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT         5
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK     0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT    6
-	u8 flags1;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK   0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT  0
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK           0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT          2
-#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK                     0x3
-#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT                    4
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK            0x3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT           6
-	u8 flags2;
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK  0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK               0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT              1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK               0x1
-#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT              2
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK                   0x1
-#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT                  3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK         0x1
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT        4
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT                5
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT                6
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT                7
-	u8 flags3;
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT                0
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT                1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT                2
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK                 0x1
-#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT                3
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK          0xF
-#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT         4
-	__le32 dif_err_intervals;
-	__le32 dif_error_1st_interval;
-	__le32 reg2;
-	__le32 dif_runt_value;
-	__le32 reg4;
-	__le32 reg5;
-};
-
-struct rdma_task_context {
-	struct ystorm_rdma_task_st_ctx ystorm_st_context;
-	struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
-	struct tdif_task_context tdif_context;
-	struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
-	struct mstorm_rdma_task_st_ctx mstorm_st_context;
-	struct rdif_task_context rdif_context;
-	struct ustorm_rdma_task_st_ctx ustorm_st_context;
-	struct regpair ustorm_st_padding[2];
-	struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
-};
-
+/* RDMA Tid type enumeration (for register_tid ramrod) */
 enum rdma_tid_type {
 	RDMA_TID_REGISTERED_MR,
 	RDMA_TID_FMR,
@@ -6556,214 +7108,214 @@ enum rdma_tid_type {
 	MAX_RDMA_TID_TYPE
 };
 
-struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT     0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT             1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT             5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT             6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT             7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK              0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT             1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT            2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT            3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT            4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT     5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT     7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT              4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT              6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT              4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK       0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT      6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT              0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK               0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT              2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT             4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT             0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT             2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK         0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT        4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT            7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT            0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT            1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT            2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT            3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT            4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT            6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT            7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT           0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT           1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT           2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT           3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT           4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT           5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT           6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT           7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT	1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT	6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT           0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT           1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT           2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT           3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT     4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK            0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT           5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT          6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT          7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT		7
 	u8 flags11;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT          0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT          1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT          2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT          3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT          4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT          5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT     6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK           0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT          7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT		2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT		3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT	6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT         0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT         1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT         4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT         5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT         6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT         7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT		5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT		6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT         0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT         1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT     3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT     4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT     5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT     6
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK      0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT     7
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT		0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT	3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT	4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT	6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK         0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT        0
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK             0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT            1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK      0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT     2
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK          0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT         4
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK  0x1
-#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK              0x3
-#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT             6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT	0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT		1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK	0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT	2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK		0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT		4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK		0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT		6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
 	__le32 reg4;
 };
 
-struct mstorm_rdma_conn_ag_ctx {
+struct e4_mstorm_rdma_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct tstorm_rdma_conn_ag_ctx {
+struct e4_tstorm_rdma_conn_ag_ctx {
 	u8 reserved0;
 	u8 byte1;
 	u8 flags0;
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
-#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT                 1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT                 2
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT                 3
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT                 4
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK                  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT                 5
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT                  6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT		3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT		6
 	u8 flags1;
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT                  0
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT                  2
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT			0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK	0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT	4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK		0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT		6
 	u8 flags2;
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK       0x3
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT      0
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT                  2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT                  4
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT                  6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK		0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT	0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT			4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT			6
 	u8 flags3;
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK                   0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT                  0
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK                  0x3
-#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT                 2
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT                4
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT                5
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT                6
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
-#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT			0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK			0x3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT			4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT			5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT			6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK	0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
-#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       0
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK    0x1
-#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT   1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT                2
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT                3
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT                4
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK                 0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT                5
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK                0x1
-#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT               6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT              7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK	0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT	1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT			2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT			3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT			4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT			5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT			6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT		7
 	u8 flags5;
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT              0
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT              1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT              2
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT              3
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT              4
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT              5
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT              6
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK               0x1
-#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT              7
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT	7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 reg2;
@@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx {
 	__le32 reg10;
 };
 
-struct tstorm_rdma_task_ag_ctx {
+struct e4_tstorm_rdma_task_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	__le16 word0;
 	u8 flags0;
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK  0xF
-#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT    4
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT    5
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT    6
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT    7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK		0xF
+#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT		6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT		7
 	u8 flags1;
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT    0
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK     0x1
-#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT    1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT     2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT     4
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT     6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT	1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT	6
 	u8 flags2;
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT     0
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT     2
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT     4
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT     6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT	6
 	u8 flags3;
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK      0x3
-#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT     0
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT   2
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT   3
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT   4
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT   5
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT   6
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT   7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT	0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT	3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT	5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT	6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK	0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT   0
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK    0x1
-#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT   1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK  0x1
-#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT		0
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT		1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT	2
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT	3
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT	4
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT	5
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT	6
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT	7
 	u8 byte2;
 	__le16 word1;
 	__le32 reg0;
@@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx {
 	__le32 reg2;
 };
 
-struct ustorm_rdma_conn_ag_ctx {
+struct e4_ustorm_rdma_conn_ag_ctx {
 	u8 reserved;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT    0
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK             0x1
-#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT            1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK      0x3
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT     2
-#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT             4
-#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT             6
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT		4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT		6
 	u8 flags1;
-#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT             0
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK     0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT    2
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK        0x3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT       4
-#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK              0x3
-#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT             6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT		0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK	0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT	2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK	0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT	4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT		6
 	u8 flags2;
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK   0x1
-#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT  0
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT           1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT           2
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT           3
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK  0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK     0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT    5
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT           6
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK         0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT        7
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT			1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT	4
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT		5
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT			6
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT		7
 	u8 flags3;
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK            0x1
-#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT           0
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT         1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT         2
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT         3
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT         4
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT         5
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT         6
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK          0x1
-#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT         7
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT		0
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 conn_dpi;
@@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct xstorm_rdma_conn_ag_ctx {
+struct e4_xstorm_rdma_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT             1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT             5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT             6
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT             7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK              0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT             1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT            2
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT            3
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT            4
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT     5
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT              4
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT              6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT              4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT              0
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK               0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT              2
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT             4
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT             0
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT             2
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK         0x3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT        4
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT            7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT            0
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT            1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT            2
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT            3
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT            4
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT            6
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT            7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT		3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT		6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT           0
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT           1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT           2
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT           3
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT           4
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT           5
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT           6
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT           7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT           0
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT           1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT           2
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT           3
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK            0x1
-#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT           5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT          6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT          7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT		1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT		3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	7
 	u8 flags11;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT          0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT          1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT          2
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT          3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT          4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT          5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK           0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT          7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT         0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT         1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT         4
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT         5
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT         6
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT         7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT         0
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT         1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
-#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT	0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT	1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK         0x1
-#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT        0
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK             0x1
-#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT            1
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK      0x3
-#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT     2
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK          0x1
-#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT         4
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK  0x1
-#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK              0x3
-#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT             6
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT		0
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK			0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT			1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK		0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT		2
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK		0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT		4
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK			0x3
+#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT			6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx {
 	__le32 reg6;
 };
 
-struct ystorm_rdma_conn_ag_ctx {
+struct e4_ystorm_rdma_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx {
 	__le32 reg3;
 };
 
-struct mstorm_roce_conn_st_ctx {
-	struct regpair temp[6];
-};
-
-struct pstorm_roce_conn_st_ctx {
-	struct regpair temp[16];
-};
-
+/* The roce storm context of Ystorm */
 struct ystorm_roce_conn_st_ctx {
 	struct regpair temp[2];
 };
 
+/* The roce storm context of Mstorm */
+struct pstorm_roce_conn_st_ctx {
+	struct regpair temp[16];
+};
+
+/* The roce storm context of Xstorm */
 struct xstorm_roce_conn_st_ctx {
 	struct regpair temp[24];
 };
 
+/* The roce storm context of Tstorm */
 struct tstorm_roce_conn_st_ctx {
 	struct regpair temp[30];
 };
 
+/* The roce storm context of Mstorm */
+struct mstorm_roce_conn_st_ctx {
+	struct regpair temp[6];
+};
+
+/* The roce storm context of Ystorm */
 struct ustorm_roce_conn_st_ctx {
 	struct regpair temp[12];
 };
 
-struct roce_conn_context {
+/* roce connection context */
+struct e4_roce_conn_context {
 	struct ystorm_roce_conn_st_ctx ystorm_st_context;
 	struct regpair ystorm_st_padding[2];
 	struct pstorm_roce_conn_st_ctx pstorm_st_context;
 	struct xstorm_roce_conn_st_ctx xstorm_st_context;
 	struct regpair xstorm_st_padding[2];
-	struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+	struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context;
 	struct timers_context timer_context;
-	struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
 	struct tstorm_roce_conn_st_ctx tstorm_st_context;
 	struct mstorm_roce_conn_st_ctx mstorm_st_context;
 	struct ustorm_roce_conn_st_ctx ustorm_st_context;
 	struct regpair ustorm_st_padding[2];
 };
 
+/* roce create qp requester ramrod data */
 struct roce_create_qp_req_ramrod_data {
 	__le16 flags;
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK  0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK        0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT       3
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT                 4
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK             0x1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT            7
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       8
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         12
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK			0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT		0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK		0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT	2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK		0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT		3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK				0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT			4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK			0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT			7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK		0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT		8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK			0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT		12
 	u8 max_ord;
 	u8 traffic_class;
 	u8 hop_limit;
@@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data {
 	__le16 dpi;
 };
 
+/* roce create qp responder ramrod data */
 struct roce_create_qp_resp_ramrod_data {
 	__le16 flags;
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK          0x3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT         0
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK           0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT          2
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK           0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT          3
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK            0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT           4
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK              0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT             5
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK  0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK	0x1
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT	7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT                 8
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK    0x1F
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT   11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK		0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT		0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT		2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT		3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT			4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK			0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT			5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK	0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT	6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK		0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT		7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK			0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT			8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK		0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT		11
 	u8 max_ird;
 	u8 traffic_class;
 	u8 hop_limit;
@@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data {
 	__le16 dpi;
 };
 
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+	struct regpair ecn_pkt_rcv;
+	struct regpair cnp_pkt_rcv;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+	struct regpair cnp_pkt_sent;
+};
+
+/* RoCE destroy qp requester output params */
 struct roce_destroy_qp_req_output_params {
 	__le32 num_bound_mw;
 	__le32 cq_prod;
 };
 
+/* RoCE destroy qp requester ramrod data */
 struct roce_destroy_qp_req_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* RoCE destroy qp responder output params */
 struct roce_destroy_qp_resp_output_params {
 	__le32 num_invalidated_mw;
 	__le32 cq_prod;
 };
 
+/* RoCE destroy qp responder ramrod data */
 struct roce_destroy_qp_resp_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* roce special events statistics */
 struct roce_events_stats {
 	__le16 silent_drops;
 	__le16 rnr_naks_sent;
@@ -7508,6 +8085,7 @@ struct roce_events_stats {
 	__le32 reserved;
 };
 
+/* ROCE slow path EQ cmd IDs */
 enum roce_event_opcode {
 	ROCE_EVENT_CREATE_QP = 11,
 	ROCE_EVENT_MODIFY_QP,
@@ -7518,6 +8096,7 @@ enum roce_event_opcode {
 	MAX_ROCE_EVENT_OPCODE
 };
 
+/* roce func init ramrod data */
 struct roce_init_func_params {
 	u8 ll2_queue_id;
 	u8 cnp_vlan_priority;
@@ -7526,42 +8105,46 @@ struct roce_init_func_params {
 	__le32 cnp_send_timeout;
 };
 
+/* roce func init ramrod data */
 struct roce_init_func_ramrod_data {
 	struct rdma_init_func_ramrod_data rdma;
 	struct roce_init_func_params roce;
 };
 
+/* roce modify qp requester ramrod data */
 struct roce_modify_qp_req_ramrod_data {
 	__le16 flags;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT     0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT     1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK  0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK            0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT           3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK   0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT  4
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK          0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT         5
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT     6
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK    0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT   7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK      0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT     8
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK              0x1
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT             9
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK                  0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT                 10
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK            0x7
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT           13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT		0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT		1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT	2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT			3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT		4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT		5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT		6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT		7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT		8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT			9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK				0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT			10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT	13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK			0x3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT			14
 	u8 fields;
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK        0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT       0
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK          0xF
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT         4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK	0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT	0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK		0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT	4
 	u8 max_ord;
 	u8 traffic_class;
 	u8 hop_limit;
@@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data {
 	__le32 ack_timeout_val;
 	__le16 mtu;
 	__le16 reserved2;
-	__le32 reserved3[3];
+	__le32 reserved3[2];
+	__le16 low_latency_phy_queue;
+	__le16 regular_latency_phy_queue;
 	__le32 src_gid[4];
 	__le32 dst_gid[4];
 };
 
+/* roce modify qp responder ramrod data */
 struct roce_modify_qp_resp_ramrod_data {
 	__le16 flags;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK        0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT       0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK             0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT            1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK             0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT            2
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK              0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT             3
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK              0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT             4
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK     0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT    5
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK            0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT           6
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK                0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT               7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK  0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK        0x1
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT       9
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK              0x3F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT             10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT		0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT		1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT		2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT			3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT			4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT	5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT		6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK			0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT			7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK	0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT	8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK		0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT		9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK	0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT	10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK			0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT			11
 	u8 fields;
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK                    0x7
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT                   0
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK      0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT     3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK		0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT		0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK	0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT	3
 	u8 max_ird;
 	u8 traffic_class;
 	u8 hop_limit;
 	__le16 p_key;
 	__le32 flow_label;
 	__le16 mtu;
-	__le16 reserved2;
+	__le16 low_latency_phy_queue;
+	__le16 regular_latency_phy_queue;
+	u8 reserved2[6];
 	__le32 src_gid[4];
 	__le32 dst_gid[4];
 };
 
+/* RoCE query qp requester output params */
 struct roce_query_qp_req_output_params {
 	__le32 psn;
 	__le32 flags;
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK          0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT         0
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK  0x1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK        0x3FFFFFFF
-#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT       2
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK		0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT		0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK	0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT	1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK		0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT		2
 };
 
+/* RoCE query qp requester ramrod data */
 struct roce_query_qp_req_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* RoCE query qp responder output params */
 struct roce_query_qp_resp_output_params {
 	__le32 psn;
 	__le32 err_flag;
@@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params {
 #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
 };
 
+/* RoCE query qp responder ramrod data */
 struct roce_query_qp_resp_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* ROCE ramrod command IDs */
 enum roce_ramrod_cmd_id {
 	ROCE_RAMROD_CREATE_QP = 11,
 	ROCE_RAMROD_MODIFY_QP,
@@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id {
 	MAX_ROCE_RAMROD_CMD_ID
 };
 
-struct mstorm_roce_req_conn_ag_ctx {
+struct e4_mstorm_roce_req_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct mstorm_roce_resp_conn_ag_ctx {
+struct e4_mstorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct tstorm_roce_req_conn_ag_ctx {
+struct e4_tstorm_roce_req_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK            0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT           1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT       2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK                        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT                       3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK                0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT               4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK                  0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT                 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK                    0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT                   6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT		0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT		1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT	2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT			3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT			5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK			0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT			6
 	u8 flags1;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                         0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                        0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK                 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT                2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK           0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT          4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK                 0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT                6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK				0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT			0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK			0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT		2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK		0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK			0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT		6
 	u8 flags2;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK             0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT            0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK                0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT               2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK           0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT          4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK               0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT              6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT	0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT	2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT	4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT	6
 	u8 flags3;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK     0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT    0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK       0x3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT      2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK                 0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT                4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK                       0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT                      5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT             6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT       7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT	0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK	0x3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT	2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT			5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT		6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT             0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK          0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT         1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK             0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT            2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK        0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT       3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK            0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT           4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK  0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK    0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT   6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT                    7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT		1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT		2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT	3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT	5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT	6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags5;
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT                    0
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT                    1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT                    2
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT                    3
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT                    4
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK              0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT             5
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT                    6
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK                     0x1
-#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT                    7
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK	0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT	5
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 reg0;
 	__le32 snd_nxt_psn;
 	__le32 snd_max_psn;
@@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx {
 	u8 byte4;
 	u8 byte5;
 	__le16 snd_sq_cons;
-	__le16 word2;
+	__le16 conn_dpi;
 	__le16 word3;
 	__le32 reg9;
 	__le32 reg10;
 };
 
-struct tstorm_roce_resp_conn_ag_ctx {
+struct e4_tstorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK  0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT               2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT               3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK        0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT       4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK                0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT               5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT                6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK	0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT	1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT			2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT			3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT			5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK			0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT			6
 	u8 flags1;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT        0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT        2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT                4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT	0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT	2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags2;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK     0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT    0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT                2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT                4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT                6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK	0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT	0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT		2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT		6
 	u8 flags3;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK                 0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT                0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK                0x3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT               2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT              4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT     6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT              7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK		0x3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT		2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT	5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK	0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT	6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT		7
 	u8 flags4;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK  0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT              2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT              3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT              4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK               0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT              5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK              0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT             6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT            7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT	1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT			2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT			3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT			4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT			5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT			6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags5;
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT            0
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT            1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT            2
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT            3
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT            4
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK          0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT         5
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT            6
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK             0x1
-#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT            7
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT	5
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 psn_and_rxmit_id_echo;
 	__le32 reg1;
 	__le32 reg2;
@@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx {
 	__le32 reg10;
 };
 
-struct ustorm_roce_req_conn_ag_ctx {
+struct e4_ustorm_roce_req_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT	3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT	5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT	6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct ustorm_roce_resp_conn_ag_ctx {
+struct e4_ustorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT	3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT	5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT	6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct xstorm_roce_req_conn_ag_ctx {
+struct e4_xstorm_roce_req_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT       0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT          1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT          2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT          4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT          5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT          6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT          7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT		7
 	u8 flags1;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT          0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK           0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT          1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT              2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT              3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT              4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT              5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK         0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT        6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT       7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT                0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT                2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT                4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT                6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT        0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT        2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK         0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT        6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT                0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK                 0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT                2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT               4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT		6
 	u8 flags6;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT               4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT               0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT               2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK           0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT          4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT              6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT              7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	7
 	u8 flags8;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT              0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT              1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT     2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT     3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT    4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT     5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT              6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT              7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT             0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT             1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT             2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT             3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT    4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT             5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT             6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT             7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT		7
 	u8 flags10;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT             0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT             1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT             2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT             3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK              0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT             5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT            6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT            7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT		6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT		7
 	u8 flags11;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT            0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT            1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT            2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT            3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT            4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK  0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT       6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK             0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT            7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT		2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK          0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT         0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT           1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK   0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT  4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT           5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK   0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT  6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK     0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT    7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT           0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT           1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT       3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT       4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT       5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT       6
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK        0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT       7
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT		0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK      0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT     0
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK               0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT              1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK        0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT       2
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK            0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT           4
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK    0x1
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT   5
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK                0x3
-#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT               6
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT	0
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT		1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK	0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT	2
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK		0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT		4
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT	5
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK		0x3
+#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT		6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx {
 	__le32 orq_cons;
 };
 
-struct xstorm_roce_resp_conn_ag_ctx {
+struct e4_xstorm_roce_resp_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT     0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT        1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT        2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT        4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT        5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT        6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT        7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT		7
 	u8 flags1;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT        0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK         0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT        1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT            2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT            3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT            4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT            5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK       0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT      6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT     7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT	7
 	u8 flags2;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT              0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT              2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT              4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT              6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK          0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK       0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT      2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK      0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK       0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT      6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK		0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT	6
 	u8 flags4;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT              0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK               0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT              2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT             4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT	6
 	u8 flags7;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT             0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT             2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK         0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT        4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT            6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT            7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	7
 	u8 flags8;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT            0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT            1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK       0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT      2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK    0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT   3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK   0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT  4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK    0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT   5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT            6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT            7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT		7
 	u8 flags9;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT           0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT           1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT           2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT           3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT           4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT           5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT           6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT           7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT	1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT           0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT           1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT           2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT           3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK            0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT           5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT          6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT          7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT		3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT		7
 	u8 flags11;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT          0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT          1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT          2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT          3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT          4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT          5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT     6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK           0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT          7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT		2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT		7
 	u8 flags12;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK  0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT     2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT         4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT         5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT         6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT         7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT		4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT		6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT         0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK          0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT         1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT     2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT     3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT     4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT     5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT     6
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK      0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT     7
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT		0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT		1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
 	u8 flags14;
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT            0
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT            1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT            2
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT            3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT            4
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK             0x1
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT            5
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK              0x3
-#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT             6
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT	0
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT	1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT	2
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT	3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT	4
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK	0x1
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT	5
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK	0x3
+#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT	6
 	u8 byte2;
 	__le16 physical_q0;
-	__le16 word1;
-	__le16 irq_prod;
-	__le16 word3;
-	__le16 word4;
-	__le16 ereserved1;
+	__le16 irq_prod_shadow;
+	__le16 word2;
 	__le16 irq_cons;
+	__le16 irq_prod;
+	__le16 e5_reserved1;
+	__le16 conn_dpi;
 	u8 rxmit_opcode;
 	u8 byte4;
 	u8 byte5;
@@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx {
 	__le32 msn_and_syndrome;
 };
 
-struct ystorm_roce_req_conn_ag_ctx {
+struct e4_ystorm_roce_req_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx {
 	__le32 reg3;
 };
 
-struct ystorm_roce_resp_conn_ag_ctx {
+struct e4_ystorm_roce_resp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT	0
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT	1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT	2
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx {
 	__le32 reg3;
 };
 
+/* Roce doorbell data */
 enum roce_flavor {
 	PLAIN_ROCE,
 	RROCE_IPV4,
@@ -8628,228 +9224,231 @@ enum roce_flavor {
 	MAX_ROCE_FLAVOR
 };
 
+/* The iwarp storm context of Ystorm */
 struct ystorm_iwarp_conn_st_ctx {
 	__le32 reserved[4];
 };
 
+/* The iwarp storm context of Pstorm */
 struct pstorm_iwarp_conn_st_ctx {
 	__le32 reserved[36];
 };
 
+/* The iwarp storm context of Xstorm */
 struct xstorm_iwarp_conn_st_ctx {
 	__le32 reserved[44];
 };
 
-struct xstorm_iwarp_conn_ag_ctx {
+struct e4_xstorm_iwarp_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT				0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT				1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT				2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT				3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT				4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT				5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT				6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
 	u8 flags2;
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT			0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT			2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT			4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	6
 	u8 flags3;
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT		0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT		6
 	u8 flags6;
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK				0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT				2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK				0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT				4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK			0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT			6
 	u8 flags7;
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT			0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT			2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT			3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT			4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT			5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT			6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT			7
 	u8 flags9;
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT			5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK				0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT			7
 	u8 flags10;
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT		1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT		3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK			0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT	7
 	u8 flags11;
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT		3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT	6
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT	7
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT	0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT	1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT	2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT		3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT		4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT		6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT		7
 	u8 flags14;
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT	0
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT	1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT	2
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT	3
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT	4
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK	0x1
-#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT	5
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK	0x3
-#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT	6
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT		0
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT		1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK		0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT		2
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT	3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT	4
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK	0x1
+#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT	5
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK		0x3
+#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT		6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 physical_q1;
@@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx {
 	__le32 reg17;
 };
 
-struct tstorm_iwarp_conn_ag_ctx {
+struct e4_tstorm_iwarp_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT	3
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT	3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT	5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT		6
 	u8 flags1;
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT		0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT	2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK			0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT			6
 	u8 flags2;
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK	0x3
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT	6
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK	0x3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT	2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT				4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK			0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT			5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT		6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT		7
 	u8 flags4;
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT	1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT	3
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT				0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT				1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT				2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT				3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK				0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT				4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define	E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT	6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT			7
 	u8 flags5;
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT	0
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	2
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	3
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	4
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT	5
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	6
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK	0x1
-#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT	7
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT		0
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT		1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT		2
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT		3
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT		4
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK	0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT	5
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT		6
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT		7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 unaligned_nxt_seq;
@@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx {
 	__le32 last_hq_sequence;
 };
 
+/* The iwarp storm context of Tstorm */
 struct tstorm_iwarp_conn_st_ctx {
 	__le32 reserved[60];
 };
 
+/* The iwarp storm context of Mstorm */
 struct mstorm_iwarp_conn_st_ctx {
 	__le32 reserved[32];
 };
 
+/* The iwarp storm context of Ustorm */
 struct ustorm_iwarp_conn_st_ctx {
 	__le32 reserved[24];
 };
 
-struct iwarp_conn_context {
+/* iwarp connection context */
+struct e4_iwarp_conn_context {
 	struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
 	struct regpair ystorm_st_padding[2];
 	struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
 	struct regpair pstorm_st_padding[2];
 	struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
 	struct regpair xstorm_st_padding[2];
-	struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+	struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
 	struct timers_context timer_context;
-	struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
 	struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2];
 	struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
 	struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
 };
 
+/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
 struct iwarp_create_qp_ramrod_data {
 	u8 flags;
 #define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT	1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT	2
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT	3
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT	4
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK	0x1
-#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT	5
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK	0x3
-#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT	6
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT	0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT		1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT		2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT		3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT		4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK		0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT		5
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK		0x3
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT		6
 	u8 reserved1;
 	__le16 pd;
 	__le16 sq_num_pages;
@@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data {
 	u8 reserved2[6];
 };
 
+/* iWARP completion queue types */
 enum iwarp_eqe_async_opcode {
 	IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
 	IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
@@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion {
 	u8 reserved[5];
 };
 
+/* iWARP completion queue types */
 enum iwarp_eqe_sync_opcode {
 	IWARP_EVENT_TYPE_TCP_OFFLOAD =
 	11,
@@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode {
 	MAX_IWARP_EQE_SYNC_OPCODE
 };
 
+/* iWARP EQE completion status */
 enum iwarp_fw_return_code {
 	IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5,
 	IWARP_CONN_ERROR_TCP_CONNECTION_RST,
@@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code {
 	MAX_IWARP_FW_RETURN_CODE
 };
 
+/* unaligned opaque data received from LL2 */
 struct iwarp_init_func_params {
 	u8 ll2_ooo_q_index;
 	u8 reserved1[7];
 };
 
+/* iwarp func init ramrod data */
 struct iwarp_init_func_ramrod_data {
 	struct rdma_init_func_ramrod_data rdma;
 	struct tcp_init_params tcp;
 	struct iwarp_init_func_params iwarp;
 };
 
+/* iWARP QP - possible states to transition to */
 enum iwarp_modify_qp_new_state_type {
 	IWARP_MODIFY_QP_STATE_CLOSING = 1,
-	IWARP_MODIFY_QP_STATE_ERROR =
-	2,
+	IWARP_MODIFY_QP_STATE_ERROR = 2,
 	MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
 };
 
+/* iwarp modify qp responder ramrod data */
 struct iwarp_modify_qp_ramrod_data {
 	__le16 transition_to_state;
 	__le16 flags;
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT	0
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT	1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT	2
-#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK	0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK		0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT		0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK		0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT		1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK		0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT		2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK		0x1
 #define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT	3
 #define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK	0x1
-#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK	0x7FF
-#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT	5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT	4
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK		0x7FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT		5
 	__le32 reserved3[3];
 	__le32 reserved4[8];
 };
 
+/* MPA params for Enhanced mode */
 struct mpa_rq_params {
 	__le32 ird;
 	__le32 ord;
 };
 
+/* MPA host Address-Len for private data */
 struct mpa_ulp_buffer {
 	struct regpair addr;
 	__le16 len;
 	__le16 reserved[3];
 };
 
+/* iWARP MPA offload params common to Basic and Enhanced modes */
 struct mpa_outgoing_params {
 	u8 crc_needed;
 	u8 reject;
@@ -9181,6 +9794,9 @@ struct mpa_outgoing_params {
 	struct mpa_ulp_buffer outgoing_ulp_buffer;
 };
 
+/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
+ * Ramrod.
+ */
 struct iwarp_mpa_offload_ramrod_data {
 	struct mpa_outgoing_params common;
 	__le32 tcp_cid;
@@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data {
 	u8 tcp_connect_side;
 	u8 rtr_pref;
 #define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK	0x7
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK	0x1F
-#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT	3
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT	0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK		0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT		3
 	u8 reserved2;
 	struct mpa_ulp_buffer incoming_ulp_buffer;
 	struct regpair async_eqe_output_buf;
 	struct regpair handle_for_async;
 	struct regpair shared_queue_addr;
+	__le16 rcv_wnd;
 	u8 stats_counter_id;
-	u8 reserved3[15];
+	u8 reserved3[13];
 };
 
+/* iWARP TCP connection offload params passed by driver to FW */
 struct iwarp_offload_params {
 	struct mpa_ulp_buffer incoming_ulp_buffer;
 	struct regpair async_eqe_output_buf;
@@ -9211,22 +9829,24 @@ struct iwarp_offload_params {
 	u8 reserved[10];
 };
 
+/* iWARP query QP output params */
 struct iwarp_query_qp_output_params {
 	__le32 flags;
 #define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK	0x1
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT	0
 #define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK	0x7FFFFFFF
-#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT	1
 	u8 reserved1[4];
 };
 
+/* iWARP query QP ramrod data */
 struct iwarp_query_qp_ramrod_data {
 	struct regpair output_params_addr;
 };
 
+/* iWARP Ramrod Command IDs */
 enum iwarp_ramrod_cmd_id {
-	IWARP_RAMROD_CMD_ID_TCP_OFFLOAD =
-	11,
+	IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
 	IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 	IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
 	IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id {
 	MAX_IWARP_RAMROD_CMD_ID
 };
 
+/* Per PF iWARP retransmit path statistics */
 struct iwarp_rxmit_stats_drv {
 	struct regpair tx_go_to_slow_start_event_cnt;
 	struct regpair tx_fast_retransmit_event_cnt;
 };
 
+/* iWARP and TCP connection offload params passed by driver to FW in iWARP
+ * offload ramrod.
+ */
 struct iwarp_tcp_offload_ramrod_data {
 	struct iwarp_offload_params iwarp;
 	struct tcp_offload_params_opt2 tcp;
 };
 
+/* iWARP MPA negotiation types */
 enum mpa_negotiation_mode {
 	MPA_NEGOTIATION_TYPE_BASIC = 1,
 	MPA_NEGOTIATION_TYPE_ENHANCED = 2,
 	MAX_MPA_NEGOTIATION_MODE
 };
 
+/* iWARP MPA Enhanced mode RTR types */
 enum mpa_rtr_type {
 	MPA_RTR_TYPE_NONE = 0,
 	MPA_RTR_TYPE_ZERO_SEND = 1,
@@ -9264,113 +9890,114 @@ enum mpa_rtr_type {
 	MAX_MPA_RTR_TYPE
 };
 
+/* unaligned opaque data received from LL2 */
 struct unaligned_opaque_data {
 	__le16 first_mpa_offset;
 	u8 tcp_payload_offset;
 	u8 flags;
 #define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK	0x1
-#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK	0x1
-#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT	1
-#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK	0x3F
-#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT	2
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT	0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK		0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT		1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK			0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT			2
 	__le32 cid;
 };
 
-struct mstorm_iwarp_conn_ag_ctx {
+struct e4_mstorm_iwarp_conn_ag_ctx {
 	u8 reserved;
 	u8 state;
 	u8 flags0;
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK	0x3
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT	2
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT		0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK			0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT			1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK	0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT	2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT			4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT			6
 	u8 flags1;
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	2
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	3
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT	4
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	5
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT	6
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	7
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK	0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT	0
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT			1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT		3
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT		4
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT		5
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT		6
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT		7
 	__le16 rcq_cons;
 	__le16 rcq_cons_th;
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct ustorm_iwarp_conn_ag_ctx {
+struct e4_ustorm_iwarp_conn_ag_ctx {
 	u8 reserved;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
-#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT		2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT		4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT		6
 	u8 flags1;
-#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT	4
-#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK	0x3
-#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT	6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT		0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK	0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT	2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK	0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT	4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK		0x3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT		6
 	u8 flags2;
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT	3
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT	5
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT	6
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT	7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT			0
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT			1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT	4
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT		5
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT			6
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT		7
 	u8 flags3;
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT	0
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	2
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	3
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	4
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	5
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	6
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK	0x1
-#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT	7
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK		0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT		0
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx {
 	__le16 word3;
 };
 
-struct ystorm_iwarp_conn_ag_ctx {
+struct e4_ystorm_iwarp_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT	0
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	2
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
-#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT	0
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT	1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT	2
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
-#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx {
 	__le32 reg3;
 };
 
+/* The fcoe storm context of Ystorm */
 struct ystorm_fcoe_conn_st_ctx {
 	u8 func_mode;
 	u8 cos;
@@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx {
 	struct regpair reserved;
 	__le16 min_frame_size;
 	u8 protection_info_flags;
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK  0x1
-#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK               0x1
-#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT              1
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK           0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT          2
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK		0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT	0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK			0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT			1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK			0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT			2
 	u8 dst_protection_per_mss;
 	u8 src_protection_per_mss;
 	u8 ptu_log_page_size;
 	u8 flags;
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
-#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    0
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
-#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    1
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                0x3F
-#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT               2
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK	0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT	0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK	0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT	1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK		0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT		2
 	u8 fcp_xfer_size;
 };
 
+/* FCoE 16-bits vlan structure */
 struct fcoe_vlan_fields {
 	__le16 fields;
-#define FCOE_VLAN_FIELDS_VID_MASK  0xFFF
-#define FCOE_VLAN_FIELDS_VID_SHIFT 0
-#define FCOE_VLAN_FIELDS_CLI_MASK  0x1
-#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
-#define FCOE_VLAN_FIELDS_PRI_MASK  0x7
-#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+#define FCOE_VLAN_FIELDS_VID_MASK	0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT	0
+#define FCOE_VLAN_FIELDS_CLI_MASK	0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT	12
+#define FCOE_VLAN_FIELDS_PRI_MASK	0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT	13
 };
 
+/* FCoE 16-bits vlan union */
 union fcoe_vlan_field_union {
 	struct fcoe_vlan_fields fields;
 	__le16 val;
 };
 
+/* FCoE 16-bits vlan, vif union */
 union fcoe_vlan_vif_field_union {
 	union fcoe_vlan_field_union vlan;
 	__le16 vif;
 };
 
+/* Ethernet context section */
 struct pstorm_fcoe_eth_context_section {
 	u8 remote_addr_3;
 	u8 remote_addr_2;
@@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section {
 	__le16 inner_eth_type;
 };
 
+/* The fcoe storm context of Pstorm */
 struct pstorm_fcoe_conn_st_ctx {
 	u8 func_mode;
 	u8 cos;
@@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx {
 	u8 sid_1;
 	u8 sid_0;
 	u8 flags;
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK          0x1
-#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT         0
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK  0x1
-#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK     0x1
-#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT    2
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK     0x1
-#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT    3
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK            0xF
-#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT           4
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK			0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT		0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT	1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT		2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT		3
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK		0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT		4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK			0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT			5
 	u8 did_2;
 	u8 did_1;
 	u8 did_0;
@@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx {
 	u8 reserved1;
 };
 
+/* The fcoe storm context of Xstorm */
 struct xstorm_fcoe_conn_st_ctx {
 	u8 func_mode;
 	u8 src_mac_index;
@@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx {
 	u8 cached_wqes_avail;
 	__le16 stat_ram_addr;
 	u8 flags;
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK             0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT            0
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK         0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT        1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK    0x1
-#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT   2
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK      0x3
-#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT     3
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                    0x7
-#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT                   5
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT		0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT		1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK	0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT	2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK		0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT	3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK			0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT			5
 	u8 cached_wqes_offset;
 	u8 reserved2;
 	u8 eth_hdr_size;
@@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx {
 	u8 fcp_cmd_byte_credit;
 	u8 fcp_rsp_byte_credit;
 	__le16 protection_info;
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK         0x1
-#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT        0
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK      0x1
-#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT     1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK                   0x1
-#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT                  2
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK      0x1
-#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT     3
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK               0xF
-#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT              4
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK  0xFF
-#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT		0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT	1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK			0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT			2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK		0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT	3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK			0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT			4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK	0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT	8
 	__le16 xferq_pbl_next_index;
 	__le16 page_size;
 	u8 mid_seq;
@@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx {
 	struct fcoe_wqe cached_wqes[16];
 };
 
-struct xstorm_fcoe_conn_ag_ctx {
+struct e4_xstorm_fcoe_conn_ag_ctx {
 	u8 reserved0;
-	u8 fcoe_state;
+	u8 state;
 	u8 flags0;
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT         1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT         2
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT         4
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT         5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT         6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT         7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT	7
 	u8 flags1;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT         0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT         1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK          0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT         2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT		7
 	u8 flags2;
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT               4
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT               6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT	6
 	u8 flags3;
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT               4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT               6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT               0
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK                0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT               2
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT              0
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT              2
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT	6
 	u8 flags6;
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT              0
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT              2
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT              4
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK              0x3
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT             6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT	6
 	u8 flags7;
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK         0x3
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT        2
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK          0x3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT         4
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		7
 	u8 flags8;
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT             0
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT             1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT             2
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT             6
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT             7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT	7
 	u8 flags9;
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT            0
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT            1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT            2
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT            3
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT            4
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT            5
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT            6
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT            7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT	7
 	u8 flags10;
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT            0
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
-#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       2
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT        3
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT      4
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK             0x1
-#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT            5
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT        6
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT        7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT		0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT	7
 	u8 flags11;
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT        0
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT        1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK         0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT        2
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT           3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT           4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT           5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT      6
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK  0x1
-#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT		0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT		1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT		2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK			0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT		3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK			0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT		4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK			0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK     0x1
-#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT    0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT      2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT          4
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT          5
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT          6
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT          7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT	6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT	7
 	u8 flags13;
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK  0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK           0x1
-#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT          1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT      2
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT      3
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT      4
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT      5
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT      6
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK       0x1
-#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT      7
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT		1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT		2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT		3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT		4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT		5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT		6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK		0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT		7
 	u8 flags14;
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT             0
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT             1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT             2
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT             3
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT             4
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK              0x1
-#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT             5
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK               0x3
-#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT              6
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT	0
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT	1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT	2
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT	3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT	4
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK	0x1
+#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT	5
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK	0x3
+#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT	6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 word1;
@@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx {
 	__le32 reg8;
 };
 
+/* The fcoe storm context of Ustorm */
 struct ustorm_fcoe_conn_st_ctx {
 	struct regpair respq_pbl_addr;
 	__le16 num_pages_in_pbl;
@@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx {
 	u8 reserved[2];
 };
 
-struct tstorm_fcoe_conn_ag_ctx {
+struct e4_tstorm_fcoe_conn_ag_ctx {
 	u8 reserved0;
-	u8 fcoe_state;
+	u8 state;
 	u8 flags0;
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK          0x1
-#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT         0
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT                 1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT                 2
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT                 3
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT                 4
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK                  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT                 5
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK        0x3
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT       6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT		3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT	6
 	u8 flags1;
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK           0x3
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT          0
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT                  2
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK     0x3
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT    4
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT                  6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK		0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT		0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT			2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT			6
 	u8 flags2;
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT                  0
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT                  2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT                  4
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT                  6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK                   0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT                  0
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK                  0x3
-#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT                 2
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK     0x1
-#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT    4
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK        0x1
-#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT       5
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT                6
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK  0x1
-#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT			0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK			0x3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT			2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT		5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT			6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT                0
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT                1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT                2
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT                3
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT                4
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK                 0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT                5
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK                0x1
-#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT               6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT              7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT		0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT		1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT		2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT		3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT		4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT		5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT		6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags5;
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT              0
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT              1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT              2
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT              3
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT              4
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT              5
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT              6
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK               0x1
-#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT              7
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT	7
 	__le32 reg0;
 	__le32 reg1;
 };
 
-struct ustorm_fcoe_conn_ag_ctx {
+struct e4_ustorm_fcoe_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT		3
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT		4
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT		5
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT		6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK		0x1
+#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx {
 	__le16 word3;
 };
 
+/* The fcoe storm context of Tstorm */
 struct tstorm_fcoe_conn_st_ctx {
 	__le16 stat_ram_addr;
 	__le16 rx_max_fc_payload_len;
 	__le16 e_d_tov_val;
 	u8 flags;
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK   0x1
-#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT  0
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK  0x1
-#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK     0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT    2
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK	0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT	0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK	0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT	1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK		0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT		2
 	u8 timers_cleanup_invocation_cnt;
 	__le32 reserved1[2];
-	__le32 dst_mac_address_bytes0to3;
-	__le16 dst_mac_address_bytes4to5;
+	__le32 dst_mac_address_bytes_0_to_3;
+	__le16 dst_mac_address_bytes_4_to_5;
 	__le16 ramrod_echo;
 	u8 flags1;
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK          0x3
-#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT         0
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK      0x3F
-#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT     2
-	u8 q_relative_offset;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK	0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT	0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK	0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT	2
+	u8 cq_relative_offset;
+	u8 cmdq_relative_offset;
 	u8 bdq_resource_id;
-	u8 reserved0[5];
+	u8 reserved0[4];
 };
 
-struct mstorm_fcoe_conn_ag_ctx {
+struct e4_mstorm_fcoe_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
+/* Fast path part of the fcoe storm context of Mstorm */
 struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
 	__le16 xfer_prod;
-	__le16 reserved1;
+	u8 num_cqs;
+	u8 reserved1;
 	u8 protection_info;
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK  0x1
 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
@@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
 	u8 reserved2[2];
 };
 
+/* Non fast path part of the fcoe storm context of Mstorm */
 struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
 	__le16 conn_id;
 	__le16 stat_ram_addr;
@@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
 	struct regpair reserved2[3];
 };
 
+/* The fcoe storm context of Mstorm */
 struct mstorm_fcoe_conn_st_ctx {
 	struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
 	struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
 };
 
-struct fcoe_conn_context {
+/* fcoe connection context */
+struct e4_fcoe_conn_context {
 	struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
 	struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
 	struct regpair pstorm_st_padding[2];
 	struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
-	struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+	struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
 	struct regpair xstorm_ag_padding[6];
 	struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
 	struct regpair ustorm_st_padding[2];
-	struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+	struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
 	struct regpair tstorm_ag_padding[2];
 	struct timers_context timer_context;
-	struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
 	struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
-	struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+	struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
 	struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
 };
 
+/* FCoE connection offload params passed by driver to FW in FCoE offload
+ * ramrod.
+ */
 struct fcoe_conn_offload_ramrod_params {
 	struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
 };
 
+/* FCoE connection terminate params passed by driver to FW in FCoE terminate
+ * conn ramrod.
+ */
 struct fcoe_conn_terminate_ramrod_params {
 	struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
 };
 
+/* FCoE event type */
 enum fcoe_event_type {
 	FCOE_EVENT_INIT_FUNC,
 	FCOE_EVENT_DESTROY_FUNC,
@@ -10129,10 +10780,12 @@ enum fcoe_event_type {
 	MAX_FCOE_EVENT_TYPE
 };
 
+/* FCoE init params passed by driver to FW in FCoE init ramrod */
 struct fcoe_init_ramrod_params {
 	struct fcoe_init_func_ramrod_data init_ramrod_data;
 };
 
+/* FCoE ramrod Command IDs */
 enum fcoe_ramrod_cmd_id {
 	FCOE_RAMROD_CMD_ID_INIT_FUNC,
 	FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
@@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id {
 	MAX_FCOE_RAMROD_CMD_ID
 };
 
+/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod.
+ */
 struct fcoe_stat_ramrod_params {
 	struct fcoe_stat_ramrod_data stat_ramrod_data;
 };
 
-struct ystorm_fcoe_conn_ag_ctx {
+struct e4_ystorm_fcoe_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx {
 	__le32 reg3;
 };
 
+/* The iscsi storm connection context of Ystorm */
 struct ystorm_iscsi_conn_st_ctx {
-	__le32 reserved[4];
+	__le32 reserved[8];
 };
 
+/* Combined iSCSI and TCP storm connection of Pstorm */
 struct pstorm_iscsi_tcp_conn_st_ctx {
 	__le32 tcp[32];
 	__le32 iscsi[4];
 };
 
+/* The combined tcp and iscsi storm context of Xstorm */
 struct xstorm_iscsi_tcp_conn_st_ctx {
-	__le32 reserved_iscsi[40];
 	__le32 reserved_tcp[4];
+	__le32 reserved_iscsi[44];
 };
 
-struct xstorm_iscsi_conn_ag_ctx {
+struct e4_xstorm_iscsi_conn_ag_ctx {
 	u8 cdu_validation;
 	u8 state;
 	u8 flags0;
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT               0
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT               1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT                  2
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT                  5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT                       6
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT                       7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT	3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT		7
 	u8 flags1;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK                        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT                       1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT                      5
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT                7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT		0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT		1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT		3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT	7
 	u8 flags2;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT                        4
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK              0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT             6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK			0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK			0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK			0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	6
 	u8 flags3;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT                        4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT                        6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT	4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT	6
 	u8 flags4;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT                        0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK                         0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT                        2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT                       6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT	4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT	6
 	u8 flags5;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT                       2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK     0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT    6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK				0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT				0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK				0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT				2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK				0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT				4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT	6
 	u8 flags6;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT                       0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT                       2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK                        0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT                       4
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT		0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT	6
 	u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK	0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT        0
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK	0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT        2
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT                      7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK		0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT			7
 	u8 flags8;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT                      0
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK           0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT          1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT                      5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT                      6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT                      7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT			3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT			5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT			7
 	u8 flags9;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT                     0
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT                     1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT                     2
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT                     3
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT                     4
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK  0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT                     6
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT                     7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT			1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT			3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT			7
 	u8 flags10;
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK                      0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK	0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT     2
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK	0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT     3
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT       5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT                    6
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK				0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT			1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT	3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT			6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT	7
 	u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT              0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT                  2
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT                    3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT                    4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT                    5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT               6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT                    7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT	2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT	3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT	4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT	6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT	7
 	u8 flags12;
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK              0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT             0
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT                   1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT               2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT                   4
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT                   5
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT                   6
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK                    0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT                   7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT		1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT		3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT		7
 	u8 flags13;
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK            0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT           0
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK              0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT             1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT               2
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT               3
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT               4
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT               5
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT               6
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK                0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT               7
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT	0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT	1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT		2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT		3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT		4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT		5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT		6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK		0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT		7
 	u8 flags14;
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT                      0
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT                      1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT                      2
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT                      3
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK                       0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT                      4
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK             0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT            5
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK           0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT          6
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT			0
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT			1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT			2
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT			3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK			0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT			4
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK	0x1
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT	5
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK	0x3
+#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT	6
 	u8 byte2;
 	__le16 physical_q0;
 	__le16 physical_q1;
@@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx {
 	u8 byte13;
 	u8 byte14;
 	u8 byte15;
-	u8 ereserved;
+	u8 e5_reserved;
 	__le16 word11;
 	__le32 reg10;
 	__le32 reg11;
@@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx {
 	__le32 reg17;
 };
 
-struct tstorm_iscsi_conn_ag_ctx {
+struct e4_tstorm_iscsi_conn_ag_ctx {
 	u8 reserved0;
 	u8 state;
 	u8 flags0;
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT      0
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT              1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT              2
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT              3
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT              4
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK               0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT              5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT	0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT		1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT		2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT		3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT		4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT		5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT		6
 	u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK	0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT      0
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK	0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT      2
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT		0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT		2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT	4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK			0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT			6
 	u8 flags2;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT               0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT               2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT               4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT               6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT	0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT	2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT	4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK	0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT	6
 	u8 flags3;
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK           0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT          0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK               0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK	0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT   5
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK	0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT   6
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK		0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT		0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK			0x3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT			2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK			0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT			4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT	5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT	6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT             0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT             1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT             2
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT             3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK        0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT       5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK             0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT            6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT           7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT		0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT		1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT		2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT		3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT		4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT	5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK		0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT	6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags5;
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT           0
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT           1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT           2
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT           3
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT           4
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT           5
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT           6
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK            0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT           7
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT	7
 	__le32 reg0;
 	__le32 reg1;
 	__le32 reg2;
@@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx {
 	__le16 word0;
 };
 
-struct ustorm_iscsi_conn_ag_ctx {
+struct e4_ustorm_iscsi_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT     0
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT     2
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT     4
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK      0x3
-#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT     6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT	0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT	2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT	4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK	0x3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT	6
 	u8 flags2;
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT   3
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT   4
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT   5
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK    0x1
-#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT   6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT		3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT		4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT		5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK		0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT		6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	7
 	u8 flags3;
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK  0x1
-#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	0
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	2
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	3
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT	4
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT	5
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT	6
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK	0x1
+#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx {
 	__le16 word3;
 };
 
+/* The iscsi storm connection context of Tstorm */
 struct tstorm_iscsi_conn_st_ctx {
-	__le32 reserved[40];
+	__le32 reserved[44];
 };
 
-struct mstorm_iscsi_conn_ag_ctx {
+struct e4_mstorm_iscsi_conn_ag_ctx {
 	u8 reserved;
 	u8 state;
 	u8 flags0;
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	7
 	__le16 word0;
 	__le16 word1;
 	__le32 reg0;
 	__le32 reg1;
 };
 
+/* Combined iSCSI and TCP storm connection of Mstorm */
 struct mstorm_iscsi_tcp_conn_st_ctx {
 	__le32 reserved_tcp[20];
-	__le32 reserved_iscsi[8];
+	__le32 reserved_iscsi[12];
 };
 
+/* The iscsi storm context of Ustorm */
 struct ustorm_iscsi_conn_st_ctx {
 	__le32 reserved[52];
 };
 
-struct iscsi_conn_context {
+/* iscsi connection context */
+struct e4_iscsi_conn_context {
 	struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
-	struct regpair ystorm_st_padding[2];
 	struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
 	struct regpair pstorm_st_padding[2];
 	struct pb_context xpb2_context;
 	struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
 	struct regpair xstorm_st_padding[2];
-	struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
-	struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+	struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+	struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
 	struct regpair tstorm_ag_padding[2];
 	struct timers_context timer_context;
-	struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+	struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
 	struct pb_context upb_context;
 	struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
 	struct regpair tstorm_st_padding[2];
-	struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+	struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
 	struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
 	struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
 };
 
+/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
 struct iscsi_init_ramrod_params {
 	struct iscsi_spe_func_init iscsi_init_spe;
 	struct tcp_init_params tcp_init;
 };
 
-struct ystorm_iscsi_conn_ag_ctx {
+struct e4_ystorm_iscsi_conn_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	u8 flags0;
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK     0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT    0
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK     0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT    1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT     2
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT     4
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK      0x3
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT     6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT	0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT	1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK	0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT	2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK	0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT	4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK	0x3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT	6
 	u8 flags1;
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT   0
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT   1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK    0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT   2
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK  0x1
-#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT		0
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT		1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK		0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT		2
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT	3
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT	4
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT	5
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT	6
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT	7
 	u8 byte2;
 	u8 byte3;
 	__le16 word0;
@@ -11613,7 +12276,7 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_DCBX_NOTIFY_MASK		0x000000FF
 #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT		3
 
-#define DRV_MB_PARAM_NVM_LEN_SHIFT		24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET		24
 
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT	0
 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK	0x000000FF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a05feb3..fca2dbd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
 	return rc;
 }
 
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, const char *phase)
+{
+	u32 size = PAGE_SIZE / 2, val;
+	struct qed_dmae_params params;
+	int rc = 0;
+	dma_addr_t p_phys;
+	void *p_virt;
+	u32 *p_tmp;
+
+	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				    2 * size, &p_phys, GFP_KERNEL);
+	if (!p_virt) {
+		DP_NOTICE(p_hwfn,
+			  "DMAE sanity [%s]: failed to allocate memory\n",
+			  phase);
+		return -ENOMEM;
+	}
+
+	/* Fill the bottom half of the allocated memory with a known pattern */
+	for (p_tmp = (u32 *)p_virt;
+	     p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+		/* Save the address itself as the value */
+		val = (u32)(uintptr_t)p_tmp;
+		*p_tmp = val;
+	}
+
+	/* Zero the top half of the allocated memory */
+	memset((u8 *)p_virt + size, 0, size);
+
+	DP_VERBOSE(p_hwfn,
+		   QED_MSG_SP,
+		   "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+		   phase,
+		   (u64)p_phys,
+		   p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+	memset(&params, 0, sizeof(params));
+	rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+				size / 4 /* size_in_dwords */, &params);
+	if (rc) {
+		DP_NOTICE(p_hwfn,
+			  "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+			  phase, rc);
+		goto out;
+	}
+
+	/* Verify that the top half of the allocated memory has the pattern */
+	for (p_tmp = (u32 *)((u8 *)p_virt + size);
+	     p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+		/* The corresponding address in the bottom half */
+		val = (u32)(uintptr_t)p_tmp - size;
+
+		if (*p_tmp != val) {
+			DP_NOTICE(p_hwfn,
+				  "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+				  phase,
+				  (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+				  p_tmp, *p_tmp, val);
+			rc = -EINVAL;
+			goto out;
+		}
+	}
+
+out:
+	dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+	return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index f2505c6..8db2839 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -299,4 +299,8 @@ union qed_qm_pq_params {
 
 int qed_init_fw_data(struct qed_dev *cdev,
 		     const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt, const char *phase);
+
 #endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index b069ad0..18fb506 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,6 +31,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/crc8.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -40,102 +41,197 @@
 #include "qed_init_ops.h"
 #include "qed_reg_addr.h"
 
+#define CDU_VALIDATION_DEFAULT_CFG	61
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+	{400, 336, 352, 304, 304, 384, 416, 352},	/* region 3 offsets */
+	{528, 496, 416, 448, 448, 512, 544, 480},	/* region 4 offsets */
+	{608, 544, 496, 512, 576, 592, 624, 560}	/* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+	{240, 240, 112, 0, 0, 0, 0, 96}	/* region 1 offsets */
+};
+
 /* General constants */
 #define QM_PQ_MEM_4KB(pq_size)	(pq_size ? DIV_ROUND_UP((pq_size + 1) *	\
 							QM_PQ_ELEMENT_SIZE, \
 							0x1000) : 0)
 #define QM_PQ_SIZE_256B(pq_size)	(pq_size ? DIV_ROUND_UP(pq_size, \
 								0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID                        0xffff
+#define QM_INVALID_PQ_ID		0xffff
+
 /* Feature enable */
-#define QM_BYPASS_EN                            1
-#define QM_BYTE_CRD_EN                          1
+#define QM_BYPASS_EN	1
+#define QM_BYTE_CRD_EN	1
+
 /* Other PQ constants */
-#define QM_OTHER_PQS_PER_PF                     4
+#define QM_OTHER_PQS_PER_PF	4
+
 /* WFQ constants */
-#define QM_WFQ_UPPER_BOUND		62500000
-#define QM_WFQ_VP_PQ_VOQ_SHIFT          0
-#define QM_WFQ_VP_PQ_PF_SHIFT           5
-#define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL                      43750000
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND	62500000
+
+/* Bit  of VOQ in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_VOQ_SHIFT	0
+
+/* Bit  of PF in WFQ VP PQ map */
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT	5
+
+/* 0x9000 = 4*9*1024 */
+#define QM_WFQ_INC_VAL(weight)	((weight) * 0x9000)
+
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL	((QM_WFQ_UPPER_BOUND * 7) / 10)
 
 /* RL constants */
-#define QM_RL_UPPER_BOUND                       62500000
-#define QM_RL_PERIOD                            5               /* in us */
-#define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL                       43750000
-#define QM_RL_INC_VAL(rate)		max_t(u32,	\
-					      (u32)(((rate ? rate : \
-						      1000000) *    \
-						     QM_RL_PERIOD * \
-						     101) / (8 * 100)), 1)
+
+/* Period in us */
+#define QM_RL_PERIOD	5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M	(25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps */
+#define QM_RL_INC_VAL(rate) ({ \
+	typeof(rate) __rate = (rate); \
+	max_t(u32, \
+	      (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
+		    (8 * 100)), \
+	      1); })
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND	62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL	((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed)	((u32)max_t(u32, \
+						    QM_RL_INC_VAL(speed), \
+						    9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed)	QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED	(QM_VP_RL_UPPER_BOUND(10000) - 1)
+
 /* AFullOprtnstcCrdMask constants */
-#define QM_OPPOR_LINE_VOQ_DEF           1
-#define QM_OPPOR_FW_STOP_DEF            0
-#define QM_OPPOR_PQ_EMPTY_DEF           1
+#define QM_OPPOR_LINE_VOQ_DEF	1
+#define QM_OPPOR_FW_STOP_DEF	0
+#define QM_OPPOR_PQ_EMPTY_DEF	1
+
 /* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES                          150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq)           (		 \
-		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
-		(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -	 \
-		 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (	      \
-		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
-		(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
-		 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
-#define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
-						   4) *		     \
-						  2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES	150
+
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO	8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+	(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+	 (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+	(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+	 (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+	((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
 /* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS            38
-#define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
-#define BTB_PURE_LB_FACTOR                      10
-#define BTB_PURE_LB_RATIO                       7
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS	38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS	BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR	10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO	7
+
 /* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH           32
-#define QM_STOP_CMD_ADDR                2
-#define QM_STOP_CMD_STRUCT_SIZE         2
-#define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
-#define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
-#define QM_STOP_CMD_PAUSE_MASK_MASK     -1
-#define QM_STOP_CMD_GROUP_ID_OFFSET     1
-#define QM_STOP_CMD_GROUP_ID_SHIFT      16
-#define QM_STOP_CMD_GROUP_ID_MASK       15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET      1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT       24
-#define QM_STOP_CMD_PQ_TYPE_MASK        1
-#define QM_STOP_CMD_MAX_POLL_COUNT      100
-#define QM_STOP_CMD_POLL_PERIOD_US      500
+#define QM_STOP_PQ_MASK_WIDTH		32
+#define QM_STOP_CMD_ADDR		2
+#define QM_STOP_CMD_STRUCT_SIZE		2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET	0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT	0
+#define QM_STOP_CMD_PAUSE_MASK_MASK	-1
+#define QM_STOP_CMD_GROUP_ID_OFFSET	1
+#define QM_STOP_CMD_GROUP_ID_SHIFT	16
+#define QM_STOP_CMD_GROUP_ID_MASK	15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET	1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT	24
+#define QM_STOP_CMD_PQ_TYPE_MASK	1
+#define QM_STOP_CMD_MAX_POLL_COUNT	100
+#define QM_STOP_CMD_POLL_PERIOD_US	500
 
 /* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd)			cmd ## \
-	_STRUCT_SIZE
-#define QM_CMD_SET_FIELD(var, cmd, field,				  \
-			 value)        SET_FIELD(var[cmd ## _ ## field ## \
-						     _OFFSET],		  \
-						 cmd ## _ ## field,	  \
-						 value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) *	\
-						   (max_phys_tcs_per_port) + \
-						   (tc))
-#define LB_VOQ(port)				( \
-		MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phy_tcs_pr_port)	\
-	((tc) <		\
-	 LB_TC ? PHYS_VOQ(port,		\
-			  tc,			 \
-			  max_phy_tcs_pr_port) \
-		: LB_VOQ(port))
+#define QM_CMD_STRUCT_SIZE(cmd)	cmd ## _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+	SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
+		  cmd ## _ ## field, \
+		  value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \
+			  ext_voq, wrr) \
+	do { \
+		typeof(map) __map; \
+		memset(&__map, 0, sizeof(__map)); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
+			  rl_valid); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
+			  vp_pq_id); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
+		SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
+		SET_FIELD(__map.reg, \
+			  QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
+		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+			     *((u32 *)&__map)); \
+		(map) = __map; \
+	} while (0)
+
+#define WRITE_PQ_INFO_TO_RAM	1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+	(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+	((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+	(XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
+
 /******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+			  u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+	if (tc == PURE_LB_TC)
+		return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+	else
+		return port_id * max_phys_tcs_per_port + tc;
+}
+
 /* Prepare PF RL enable/disable runtime init values */
 static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
 {
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
 	if (pf_rl_en) {
+		u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+		u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
 		/* Enable RLs for all VOQs */
-		STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
-			     (1 << MAX_NUM_VOQS) - 1);
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLPFVOQENABLE_RT_OFFSET,
+			     (u32)voq_bit_mask);
+		if (num_ext_voqs >= 32)
+			STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+				     (u32)(voq_bit_mask >> 32));
+
 		/* Write RL period */
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
 		if (QM_BYPASS_EN)
 			STORE_RT_REG(p_hwfn,
 				     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
-				     QM_RL_UPPER_BOUND);
+				     QM_PF_RL_UPPER_BOUND);
 	}
 }
 
@@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
 		if (QM_BYPASS_EN)
 			STORE_RT_REG(p_hwfn,
 				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
-				     QM_RL_UPPER_BOUND);
+				     QM_VP_RL_BYPASS_THRESH_SPEED);
 	}
 }
 
@@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
  * the specified VOQ.
  */
 static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
-				       u8 voq, u16 cmdq_lines)
+				       u8 ext_voq, u16 cmdq_lines)
 {
-	u32 qm_line_crd;
+	u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
 
-	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
-	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
 			 (u32)cmdq_lines);
-	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
-	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+		     qm_line_crd);
+	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
 		     qm_line_crd);
 }
 
@@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init(
 	u8 max_phys_tcs_per_port,
 	struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
-	u8 tc, voq, port_id, num_tcs_in_port;
+	u8 tc, ext_voq, port_id, num_tcs_in_port;
+	u8 num_ext_voqs = MAX_NUM_VOQS_E4;
 
-	/* Clear PBF lines for all VOQs */
-	for (voq = 0; voq < MAX_NUM_VOQS; voq++)
-		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+	/* Clear PBF lines of all VOQs */
+	for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
+
 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-		if (port_params[port_id].active) {
-			u16 phys_lines, phys_lines_per_tc;
+		u16 phys_lines, phys_lines_per_tc;
 
-			/* find #lines to divide between active phys TCs */
-			phys_lines = port_params[port_id].num_pbf_cmd_lines -
-				     PBF_CMDQ_PURE_LB_LINES;
-			/* find #lines per active physical TC */
-			num_tcs_in_port = 0;
-			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-				if (((port_params[port_id].active_phys_tcs >>
-				      tc) & 0x1) == 1)
-					num_tcs_in_port++;
-			}
+		if (!port_params[port_id].active)
+			continue;
 
-			phys_lines_per_tc = phys_lines / num_tcs_in_port;
-			/* init registers per active TC */
-			for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
-				if (((port_params[port_id].active_phys_tcs >>
-				      tc) & 0x1) != 1)
-					continue;
+		/* Find number of command queue lines to divide between the
+		 * active physical TCs. In E5, 1/8 of the lines are reserved.
+		 * the lines for pure LB TC are subtracted.
+		 */
+		phys_lines = port_params[port_id].num_pbf_cmd_lines;
+		phys_lines -= PBF_CMDQ_PURE_LB_LINES;
 
-				voq = PHYS_VOQ(port_id, tc,
-					       max_phys_tcs_per_port);
-				qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+		/* Find #lines per active physical TC */
+		num_tcs_in_port = 0;
+		for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+			if (((port_params[port_id].active_phys_tcs >>
+			      tc) & 0x1) == 1)
+				num_tcs_in_port++;
+		phys_lines_per_tc = phys_lines / num_tcs_in_port;
+
+		/* Init registers per active TC */
+		for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+			ext_voq = qed_get_ext_voq(p_hwfn,
+						  port_id,
+						  tc, max_phys_tcs_per_port);
+			if (((port_params[port_id].active_phys_tcs >>
+			      tc) & 0x1) == 1)
+				qed_cmdq_lines_voq_rt_init(p_hwfn,
+							   ext_voq,
 							   phys_lines_per_tc);
-			}
-
-			/* init registers for pure LB TC */
-			qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
-						   PBF_CMDQ_PURE_LB_LINES);
 		}
+
+		/* Init registers for pure LB TC */
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  port_id,
+					  PURE_LB_TC, max_phys_tcs_per_port);
+		qed_cmdq_lines_voq_rt_init(p_hwfn,
+					   ext_voq, PBF_CMDQ_PURE_LB_LINES);
 	}
 }
 
@@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init(
 	struct init_qm_port_params port_params[MAX_NUM_PORTS])
 {
 	u32 usable_blocks, pure_lb_blocks, phys_blocks;
-	u8 tc, voq, port_id, num_tcs_in_port;
+	u8 tc, ext_voq, port_id, num_tcs_in_port;
 
 	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
-		u32 temp;
-
 		if (!port_params[port_id].active)
 			continue;
 
@@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init(
 		usable_blocks = port_params[port_id].num_btb_blocks -
 				BTB_HEADROOM_BLOCKS;
 
-		/* find blocks per physical TC */
+		/* Find blocks per physical TC. Use factor to avoid floating
+		 * arithmethic.
+		 */
 		num_tcs_in_port = 0;
-		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
 			if (((port_params[port_id].active_phys_tcs >>
 			      tc) & 0x1) == 1)
 				num_tcs_in_port++;
-		}
 
 		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
 				 (num_tcs_in_port * BTB_PURE_LB_FACTOR +
@@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init(
 		/* Init physical TCs */
 		for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
 			if (((port_params[port_id].active_phys_tcs >>
-			      tc) & 0x1) != 1)
-				continue;
-
-			voq = PHYS_VOQ(port_id, tc,
-				       max_phys_tcs_per_port);
-			STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
-				     phys_blocks);
+			      tc) & 0x1) == 1) {
+				ext_voq =
+					qed_get_ext_voq(p_hwfn,
+							port_id,
+							tc,
+							max_phys_tcs_per_port);
+				STORE_RT_REG(p_hwfn,
+					     PBF_BTB_GUARANTEED_RT_OFFSET
+					     (ext_voq), phys_blocks);
+			}
 		}
 
 		/* Init pure LB TC */
-		temp = LB_VOQ(port_id);
-		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  port_id,
+					  PURE_LB_TC, max_phys_tcs_per_port);
+		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
 			     pure_lb_blocks);
 	}
 }
 
 /* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(
-	struct qed_hwfn *p_hwfn,
-	struct qed_ptt *p_ptt,
-	struct qed_qm_pf_rt_init_params *p_params,
-	u32 base_mem_addr_4kb)
+static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt,
+				  struct qed_qm_pf_rt_init_params *p_params,
+				  u32 base_mem_addr_4kb)
 {
-	struct init_qm_vport_params *vport_params = p_params->vport_params;
-	u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
-	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
-			    QM_PF_QUEUE_GROUP_SIZE;
-	u16 i, pq_id, pq_group;
-
-	/* A bit per Tx PQ indicating if the PQ is associated with a VF */
 	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+	struct init_qm_vport_params *vport_params = p_params->vport_params;
 	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
-	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
-	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
-	u32 mem_addr_4kb = base_mem_addr_4kb;
+	u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+	struct init_qm_pq_params *pq_params = p_params->pq_params;
+	u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+	num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+	first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+	last_pq_group = (p_params->start_pq + num_pqs - 1) /
+			QM_PF_QUEUE_GROUP_SIZE;
+
+	pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+	vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+	mem_addr_4kb = base_mem_addr_4kb;
 
 	/* Set mapping from PQ group to PF */
 	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
 		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
 			     (u32)(p_params->pf_id));
+
 	/* Set PQ sizes */
 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
 		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
@@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init(
 
 	/* Go over all Tx PQs */
 	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
-		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-			     p_params->max_phys_tcs_per_port);
-		bool is_vf_pq = (i >= p_params->num_pf_pqs);
-		struct qm_rf_pq_map tx_pq_map;
+		u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id;
+		u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+		struct qm_rf_pq_map_e4 tx_pq_map;
+		bool is_vf_pq, rl_valid;
+		u16 *p_first_tx_pq_id;
 
-		bool rl_valid = p_params->pq_params[i].rl_valid &&
-				(p_params->pq_params[i].vport_id <
-				 MAX_QM_GLOBAL_RLS);
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  p_params->port_id,
+					  tc_id,
+					  p_params->max_phys_tcs_per_port);
+		is_vf_pq = (i >= p_params->num_pf_pqs);
+		rl_valid = pq_params[i].rl_valid &&
+			   pq_params[i].vport_id < max_qm_global_rls;
 
 		/* Update first Tx PQ of VPORT/TC */
-		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
-				    p_params->start_vport;
-		u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
-		u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+		vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+		p_first_tx_pq_id =
+		    &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+		if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+			u32 map_val =
+				(ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+				(p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
 
-		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
 			/* Create new VP PQ */
-			pq_ids[p_params->pq_params[i].tc_id] = pq_id;
-			first_tx_pq_id = pq_id;
+			*p_first_tx_pq_id = pq_id;
 
 			/* Map VP PQ to VOQ and PF */
 			STORE_RT_REG(p_hwfn,
 				     QM_REG_WFQVPMAP_RT_OFFSET +
-				     first_tx_pq_id,
-				     (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
-				     (p_params->pf_id <<
-				      QM_WFQ_VP_PQ_PF_SHIFT));
+				     *p_first_tx_pq_id,
+				     map_val);
 		}
 
-		if (p_params->pq_params[i].rl_valid && !rl_valid)
+		/* Check RL ID */
+		if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+		    max_qm_global_rls)
 			DP_NOTICE(p_hwfn,
-				  "Invalid VPORT ID for rate limiter configuration");
-		/* Fill PQ map entry */
-		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-		SET_FIELD(tx_pq_map.reg,
-			  QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-			  rl_valid ?
-			  p_params->pq_params[i].vport_id : 0);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
-			  p_params->pq_params[i].wrr_group);
-		/* Write PQ map entry to CAM */
-		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
-			     *((u32 *)&tx_pq_map));
-		/* Set base address */
+				  "Invalid VPORT ID for rate limiter configuration\n");
+
+		/* Prepare PQ map entry */
+		QM_INIT_TX_PQ_MAP(p_hwfn,
+				  tx_pq_map,
+				  E4,
+				  pq_id,
+				  rl_valid ? 1 : 0,
+				  *p_first_tx_pq_id,
+				  rl_valid ? pq_params[i].vport_id : 0,
+				  ext_voq, pq_params[i].wrr_group);
+
+		/* Set PQ base address */
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
 			     mem_addr_4kb);
 
+		/* Clear PQ pointer table entry (64 bit) */
+		if (p_params->is_pf_loading)
+			for (j = 0; j < 2; j++)
+				STORE_RT_REG(p_hwfn,
+					     QM_REG_PTRTBLTX_RT_OFFSET +
+					     (pq_id * 2) + j, 0);
+
+		/* Write PQ info to RAM */
+		if (WRITE_PQ_INFO_TO_RAM != 0) {
+			u32 pq_info = 0;
+
+			pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+						  p_params->pf_id,
+						  tc_id,
+						  p_params->port_id,
+						  rl_valid ? 1 : 0,
+						  rl_valid ?
+						  pq_params[i].vport_id : 0);
+			qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+			       pq_info);
+		}
+
 		/* If VF PQ, add indication to PQ VF mask */
 		if (is_vf_pq) {
 			tx_pq_vf_mask[pq_id /
@@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init(
 
 /* Prepare Other PQ mapping runtime init values for the specified PF */
 static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
-				     u8 port_id,
 				     u8 pf_id,
+				     bool is_pf_loading,
 				     u32 num_pf_cids,
 				     u32 num_tids, u32 base_mem_addr_4kb)
 {
 	u32 pq_size, pq_mem_4kb, mem_addr_4kb;
-	u16 i, pq_id, pq_group;
+	u16 i, j, pq_id, pq_group;
 
-	/* a single other PQ group is used in each PF,
-	 * where PQ group i is used in PF i.
+	/* A single other PQ group is used in each PF, where PQ group i is used
+	 * in PF i.
 	 */
 	pq_group = pf_id;
 	pq_size = num_pf_cids + num_tids;
@@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
 	/* Map PQ group to PF */
 	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
 		     (u32)(pf_id));
+
 	/* Set PQ sizes */
 	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
 		     QM_PQ_SIZE_256B(pq_size));
 
-	/* Set base address */
 	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
 	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+		/* Set PQ base address */
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
 			     mem_addr_4kb);
+
+		/* Clear PQ pointer table entry */
+		if (is_pf_loading)
+			for (j = 0; j < 2; j++)
+				STORE_RT_REG(p_hwfn,
+					     QM_REG_PTRTBLOTHER_RT_OFFSET +
+					     (pq_id * 2) + j, 0);
+
 		mem_addr_4kb += pq_mem_4kb;
 	}
 }
@@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 			      struct qed_qm_pf_rt_init_params *p_params)
 {
 	u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
-	u32 crd_reg_offset;
-	u32 inc_val;
+	struct init_qm_pq_params *pq_params = p_params->pq_params;
+	u32 inc_val, crd_reg_offset;
+	u8 ext_voq;
 	u16 i;
 
-	if (p_params->pf_id < MAX_NUM_PFS_BB)
-		crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
-	else
-		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
-	crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
-
 	inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
@@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 	}
 
 	for (i = 0; i < num_tx_pqs; i++) {
-		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
-			     p_params->max_phys_tcs_per_port);
-
+		ext_voq = qed_get_ext_voq(p_hwfn,
+					  p_params->port_id,
+					  pq_params[i].tc_id,
+					  p_params->max_phys_tcs_per_port);
+		crd_reg_offset =
+			(p_params->pf_id < MAX_NUM_PFS_BB ?
+			 QM_REG_WFQPFCRD_RT_OFFSET :
+			 QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+			ext_voq * MAX_NUM_PFS_BB +
+			(p_params->pf_id % MAX_NUM_PFS_BB);
 		OVERWRITE_RT_REG(p_hwfn,
-				 crd_reg_offset + voq * MAX_NUM_PFS_BB,
-				 QM_WFQ_CRD_REG_SIGN_BIT);
+				 crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
 	}
 
 	STORE_RT_REG(p_hwfn,
 		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
-		     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+		     QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
 	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
 		     inc_val);
+
 	return 0;
 }
 
@@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
 {
 	u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
 		return -1;
 	}
-	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
-		     QM_RL_CRD_REG_SIGN_BIT);
-	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
-		     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+
+	STORE_RT_REG(p_hwfn,
+		     QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+		     (u32)QM_RL_CRD_REG_SIGN_BIT);
+	STORE_RT_REG(p_hwfn,
+		     QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+		     QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
 	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
 	return 0;
 }
 
@@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
 			      u8 num_vports,
 			      struct init_qm_vport_params *vport_params)
 {
+	u16 vport_pq_id;
 	u32 inc_val;
 	u8 tc, i;
 
 	/* Go over all PF VPORTs */
 	for (i = 0; i < num_vports; i++) {
-
 		if (!vport_params[i].vport_wfq)
 			continue;
 
@@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
 			return -1;
 		}
 
-		/* each VPORT can have several VPORT PQ IDs for
-		 * different TCs
-		 */
+		/* Each VPORT can have several VPORT PQ IDs for various TCs */
 		for (tc = 0; tc < NUM_OF_TCS; tc++) {
-			u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc];
-
+			vport_pq_id = vport_params[i].first_tx_pq_id[tc];
 			if (vport_pq_id != QM_INVALID_PQ_ID) {
 				STORE_RT_REG(p_hwfn,
 					     QM_REG_WFQVPCRD_RT_OFFSET +
 					     vport_pq_id,
-					     QM_WFQ_CRD_REG_SIGN_BIT);
+					     (u32)QM_WFQ_CRD_REG_SIGN_BIT);
 				STORE_RT_REG(p_hwfn,
 					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
 					     vport_pq_id, inc_val);
@@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
 	return 0;
 }
 
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
 static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 				u8 start_vport,
 				u8 num_vports,
+				u32 link_speed,
 				struct init_qm_vport_params *vport_params)
 {
 	u8 i, vport_id;
+	u32 inc_val;
 
 	if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
 		DP_NOTICE(p_hwfn,
@@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 
 	/* Go over all PF VPORTs */
 	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
-		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
-
-		if (inc_val > QM_RL_MAX_INC_VAL) {
+		inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+			  vport_params[i].vport_rl :
+			  link_speed);
+		if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
 			DP_NOTICE(p_hwfn,
 				  "Invalid VPORT rate-limit configuration\n");
 			return -1;
 		}
 
-		STORE_RT_REG(p_hwfn,
-			     QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
-			     QM_RL_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+			     (u32)QM_RL_CRD_REG_SIGN_BIT);
 		STORE_RT_REG(p_hwfn,
 			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
-			     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
-		STORE_RT_REG(p_hwfn,
-			     QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+			     QM_VP_RL_UPPER_BOUND(link_speed) |
+			     (u32)QM_RL_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
 			     inc_val);
 	}
 
@@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
 {
 	u32 reg_val, i;
 
-	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
 	     i++) {
 		udelay(QM_STOP_CMD_POLL_PERIOD_US);
 		reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
@@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
 }
 
 /******************** INTERFACE IMPLEMENTATION *********************/
-u32 qed_qm_pf_mem_size(u8 pf_id,
-		       u32 num_pf_cids,
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
 		       u32 num_vf_cids,
 		       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
 {
@@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id,
 	       QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
 }
 
-int qed_qm_common_rt_init(
-	struct qed_hwfn *p_hwfn,
-	struct qed_qm_common_rt_init_params *p_params)
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+			  struct qed_qm_common_rt_init_params *p_params)
 {
-	/* init AFullOprtnstcCrdMask */
+	/* Init AFullOprtnstcCrdMask */
 	u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
 		    QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
 		   (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
@@ -664,18 +816,31 @@ int qed_qm_common_rt_init(
 		    QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
 
 	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+	/* Enable/disable PF RL */
 	qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+	/* Enable/disable PF WFQ */
 	qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+	/* Enable/disable VPORT RL */
 	qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+
+	/* Enable/disable VPORT WFQ */
 	qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+	/* Init PBF CMDQ line credit */
 	qed_cmdq_lines_rt_init(p_hwfn,
 			       p_params->max_ports_per_engine,
 			       p_params->max_phys_tcs_per_port,
 			       p_params->port_params);
+
+	/* Init BTB blocks in PBF */
 	qed_btb_blocks_rt_init(p_hwfn,
 			       p_params->max_ports_per_engine,
 			       p_params->max_phys_tcs_per_port,
 			       p_params->port_params);
+
 	return 0;
 }
 
@@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
 			vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
 
 	/* Map Other PQs (if any) */
-	qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
-				 p_params->num_pf_cids, p_params->num_tids, 0);
+	qed_other_pq_map_rt_init(p_hwfn,
+				 p_params->pf_id,
+				 p_params->is_pf_loading, p_params->num_pf_cids,
+				 p_params->num_tids, 0);
 
 	/* Map Tx PQs */
 	qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
 
+	/* Init PF WFQ */
 	if (p_params->pf_wfq)
 		if (qed_pf_wfq_rt_init(p_hwfn, p_params))
 			return -1;
 
+	/* Init PF RL */
 	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
 		return -1;
 
+	/* Set VPORT WFQ */
 	if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
 		return -1;
 
+	/* Set VPORT RL */
 	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
-				 p_params->num_vports, vport_params))
+				 p_params->num_vports, p_params->link_speed,
+				 vport_params))
 		return -1;
 
 	return 0;
@@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
 	}
 
 	qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
 	return 0;
 }
 
@@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
 {
 	u32 inc_val = QM_RL_INC_VAL(pf_rl);
 
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	if (inc_val > QM_PF_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
 		return -1;
 	}
 
-	qed_wr(p_hwfn, p_ptt,
-	       QM_REG_RLPFCRD + pf_id * 4,
-	       QM_RL_CRD_REG_SIGN_BIT);
+	qed_wr(p_hwfn,
+	       p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
 	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
 
 	return 0;
@@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
 	for (tc = 0; tc < NUM_OF_TCS; tc++) {
 		vport_pq_id = first_tx_pq_id[tc];
 		if (vport_pq_id != QM_INVALID_PQ_ID)
-			qed_wr(p_hwfn, p_ptt,
-			       QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
-			       inc_val);
+			qed_wr(p_hwfn,
+			       p_ptt,
+			       QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
 	}
 
 	return 0;
 }
 
 int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
-		      struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+		      struct qed_ptt *p_ptt,
+		      u8 vport_id, u32 vport_rl, u32 link_speed)
 {
-	u32 inc_val = QM_RL_INC_VAL(vport_rl);
+	u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
 
-	if (vport_id >= MAX_QM_GLOBAL_RLS) {
+	if (vport_id >= max_qm_global_rls) {
 		DP_NOTICE(p_hwfn,
 			  "Invalid VPORT ID for rate limiter configuration\n");
 		return -1;
 	}
 
-	if (inc_val > QM_RL_MAX_INC_VAL) {
+	inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+	if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
 		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n");
 		return -1;
 	}
 
-	qed_wr(p_hwfn, p_ptt,
-	       QM_REG_RLGLBLCRD + vport_id * 4,
-	       QM_RL_CRD_REG_SIGN_BIT);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
 	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
 
 	return 0;
@@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 			  bool is_tx_pq, u16 start_pq, u16 num_pqs)
 {
 	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
-	u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+	u32 pq_mask = 0, last_pq, pq_id;
+
+	last_pq = start_pq + num_pqs - 1;
 
 	/* Set command's PQ type */
 	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
 
+	/* Go over requested PQs */
 	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
 		/* Set PQ bit in mask (stop command only) */
 		if (!is_release_cmd)
-			pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+			pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
 
 		/* If last PQ or end of PQ mask, write command */
 		if ((pq_id == last_pq) ||
 		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
 		     (QM_STOP_PQ_MASK_WIDTH - 1))) {
-			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
-					 PAUSE_MASK, pq_mask);
-			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+			QM_CMD_SET_FIELD(cmd_arr,
+					 QM_STOP_CMD, PAUSE_MASK, pq_mask);
+			QM_CMD_SET_FIELD(cmd_arr,
+					 QM_STOP_CMD,
 					 GROUP_ID,
 					 pq_id / QM_STOP_PQ_MASK_WIDTH);
 			if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
@@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
 	return true;
 }
 
-static void
-qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
-{
-	if (enable)
-		set_bit(bit, var);
-	else
-		clear_bit(bit, var);
-}
 
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+	do { \
+		typeof(var) *__p_var = &(var); \
+		typeof(offset) __offset = offset; \
+		*__p_var = (*__p_var & ~BIT(__offset)) | \
+			   ((enable) ? BIT(__offset) : 0); \
+	} while (0)
 #define PRS_ETH_TUNN_FIC_FORMAT	-188897008
 
 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
 			     struct qed_ptt *p_ptt, u16 dest_port)
 {
+	/* Update PRS register */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+	/* Update NIG register */
 	qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+	/* Update PBF register */
 	qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
 }
 
 void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
 			  struct qed_ptt *p_ptt, bool vxlan_enable)
 {
-	unsigned long reg_val = 0;
+	u32 reg_val;
 	u8 shift;
 
+	/* Update PRS register */
 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
-
 	if (reg_val)
-		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-		       PRS_ETH_TUNN_FIC_FORMAT);
+		qed_wr(p_hwfn,
+		       p_ptt,
+		       PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+		       (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+	/* Update NIG register */
 	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
 	shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
-	       vxlan_enable ? 1 : 0);
+	/* Update DORQ register */
+	qed_wr(p_hwfn,
+	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
 }
 
-void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt,
 			bool eth_gre_enable, bool ip_gre_enable)
 {
-	unsigned long reg_val = 0;
+	u32 reg_val;
 	u8 shift;
 
+	/* Update PRS register */
 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
 	if (reg_val)
-		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-		       PRS_ETH_TUNN_FIC_FORMAT);
+		qed_wr(p_hwfn,
+		       p_ptt,
+		       PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+		       (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+	/* Update NIG register */
 	reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
 	shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
 	shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
 
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
-	       eth_gre_enable ? 1 : 0);
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
-	       ip_gre_enable ? 1 : 0);
+	/* Update DORQ registers */
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+	qed_wr(p_hwfn,
+	       p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
 }
 
 void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
 			      struct qed_ptt *p_ptt, u16 dest_port)
 {
+	/* Update PRS register */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+	/* Update NIG register */
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+	/* Update PBF register */
 	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
 }
 
@@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 			   struct qed_ptt *p_ptt,
 			   bool eth_geneve_enable, bool ip_geneve_enable)
 {
-	unsigned long reg_val = 0;
+	u32 reg_val;
 	u8 shift;
 
+	/* Update PRS register */
 	reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
 	shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
-	qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
-
+	SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
 	qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
 	if (reg_val)
-		qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
-		       PRS_ETH_TUNN_FIC_FORMAT);
+		qed_wr(p_hwfn,
+		       p_ptt,
+		       PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+		       (u32)PRS_ETH_TUNN_FIC_FORMAT);
 
+	/* Update NIG register */
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
 	       eth_geneve_enable ? 1 : 0);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-	/* EDPM with geneve tunnel not supported in BB_B0 */
+	/* EDPM with geneve tunnel not supported in BB */
 	if (QED_IS_BB_B0(p_hwfn->cdev))
 		return;
 
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+	/* Update DORQ registers */
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
 	       eth_geneve_enable ? 1 : 0);
-	qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
 	       ip_geneve_enable ? 1 : 0);
 }
 
@@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 #define RAM_LINE_SIZE sizeof(u64)
 #define REG_SIZE sizeof(u32)
 
-void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
-			      struct qed_ptt *p_ptt, u16 pf_id)
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
 {
-	u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM +
-		      pf_id * RAM_LINE_SIZE;
-
-	/*stop using gft logic */
+	/* Disable gft search for PF */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
-	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+	/* Clean ram & cam for next gft session */
+
+	/* Zero camline */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
-	qed_wr(p_hwfn, p_ptt, hw_addr, 0);
-	qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0);
+
+	/* Zero ramline */
+	qed_wr(p_hwfn,
+	       p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+	       0);
 }
 
-void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-			     u16 pf_id, bool tcp, bool udp,
-			     bool ipv4, bool ipv6)
+void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-	union gft_cam_line_union camline;
-	struct gft_ram_line ramline;
 	u32 rfs_cm_hdr_event_id;
 
+	/* Set RFS event ID to be awakened i Tstorm By Prs */
 	rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+	rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+			       PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+	rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+			       PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt,
+		    u16 pf_id,
+		    bool tcp,
+		    bool udp,
+		    bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+	u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
 
 	if (!ipv6 && !ipv4)
 		DP_NOTICE(p_hwfn,
-			  "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+			  "gft_config: must accept at least on of - ipv4 or ipv6'\n");
 	if (!tcp && !udp)
 		DP_NOTICE(p_hwfn,
-			  "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+			  "gft_config: must accept at least on of - udp or tcp\n");
+	if (profile_type >= MAX_GFT_PROFILE_TYPE)
+		DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
 
-	rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
-					PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
-	rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
-					PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
-	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+	/* Set RFS event ID to be awakened i Tstorm By Prs */
+	reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+		  PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+	reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+	qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
 
-	/* Configure Registers for RFS mode */
-	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+	/* Do not load context only cid in PRS on match. */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
-	camline.cam_line_mapped.camline = 0;
 
-	/* Cam line is now valid!! */
-	SET_FIELD(camline.cam_line_mapped.camline,
-		  GFT_CAM_LINE_MAPPED_VALID, 1);
+	/* Do not use tenant ID exist bit for gft search */
+	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
 
-	/* filters are per PF!! */
-	SET_FIELD(camline.cam_line_mapped.camline,
+	/* Set Cam */
+	cam_line = 0;
+	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+	/* Filters are per PF!! */
+	SET_FIELD(cam_line,
 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK,
 		  GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
-	SET_FIELD(camline.cam_line_mapped.camline,
-		  GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+	SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
 	if (!(tcp && udp)) {
-		SET_FIELD(camline.cam_line_mapped.camline,
+		SET_FIELD(cam_line,
 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
 			  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
 		if (tcp)
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
 				  GFT_PROFILE_TCP_PROTOCOL);
 		else
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
 				  GFT_PROFILE_UDP_PROTOCOL);
 	}
 
 	if (!(ipv4 && ipv6)) {
-		SET_FIELD(camline.cam_line_mapped.camline,
-			  GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+		SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
 		if (ipv4)
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_IP_VERSION,
 				  GFT_PROFILE_IPV4);
 		else
-			SET_FIELD(camline.cam_line_mapped.camline,
+			SET_FIELD(cam_line,
 				  GFT_CAM_LINE_MAPPED_IP_VERSION,
 				  GFT_PROFILE_IPV6);
 	}
 
 	/* Write characteristics to cam */
 	qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
-	       camline.cam_line_mapped.camline);
-	camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
-						 PRS_REG_GFT_CAM +
-						 CAM_LINE_SIZE * pf_id);
+	       cam_line);
+	cam_line =
+	    qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
 
 	/* Write line to RAM - compare to filter 4 tuple */
-	ramline.lo = 0;
-	ramline.hi = 0;
-	SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1);
-	SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1);
-	SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
-	SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1);
-	SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1);
-	SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1);
+	ram_line_lo = 0;
+	ram_line_hi = 0;
 
-	/* Each iteration write to reg */
-	qed_wr(p_hwfn, p_ptt,
+	if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+	} else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+	} else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+		SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+		SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+	}
+
+	qed_wr(p_hwfn,
+	       p_ptt,
 	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
-	       ramline.lo);
-	qed_wr(p_hwfn, p_ptt,
-	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4,
-	       ramline.hi);
+	       ram_line_lo);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE,
+	       ram_line_hi);
 
 	/* Set default profile so that no filter match will happen */
-	qed_wr(p_hwfn, p_ptt,
-	       PRS_REG_GFT_PROFILE_MASK_RAM +
-	       RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH,
-	       ramline.lo);
-	qed_wr(p_hwfn, p_ptt,
-	       PRS_REG_GFT_PROFILE_MASK_RAM +
-	       RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4,
-	       ramline.hi);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+	       PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+	qed_wr(p_hwfn,
+	       p_ptt,
+	       PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+	       PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
+
+	/* Enable gft search */
+	qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+	const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+	u8 crc, validation_byte = 0;
+	static u8 crc8_table_valid; /* automatically initialized to 0 */
+	u32 validation_string = 0;
+	u32 data_to_crc;
+
+	if (!crc8_table_valid) {
+		crc8_populate_msb(cdu_crc8_table, 0x07);
+		crc8_table_valid = 1;
+	}
+
+	/* The CRC is calculated on the String-to-compress:
+	 * [31:8]  = {CID[31:20],CID[11:0]}
+	 * [7:4]   = Region
+	 * [3:0]   = Type
+	 */
+	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+		validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+		validation_string |= ((region & 0xF) << 4);
+
+	if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+		validation_string |= (conn_type & 0xF);
+
+	/* Convert to big-endian and calculate CRC8 */
+	data_to_crc = be32_to_cpu(validation_string);
+
+	crc = crc8(cdu_crc8_table,
+		   (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+
+	/* The validation byte [7:0] is composed:
+	 * for type A validation
+	 * [7]          = active configuration bit
+	 * [6:0]        = crc[6:0]
+	 *
+	 * for type B validation
+	 * [7]          = active configuration bit
+	 * [6:3]        = connection_type[3:0]
+	 * [2:0]        = crc[2:0]
+	 */
+	validation_byte |=
+	    ((validation_cfg >>
+	      CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+	if ((validation_cfg >>
+	     CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+		validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+	else
+		validation_byte |= crc & 0x7F;
+
+	return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+				     u16 ctx_size, u8 ctx_type, u32 cid)
+{
+	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+	memset(p_ctx, 0, ctx_size);
+
+	*x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+	*t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+	*u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+				  u16 ctx_size, u8 ctx_type, u32 tid)
+{
+	u8 *p_ctx, *region1_val_ptr;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+	memset(p_ctx, 0, ctx_size);
+
+	*region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+	u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+	u8 x_val, t_val, u_val;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+	t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+	u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+	x_val = *x_val_ptr;
+	t_val = *t_val_ptr;
+	u_val = *u_val_ptr;
+
+	memset(p_ctx, 0, ctx_size);
+
+	*x_val_ptr = x_val;
+	*t_val_ptr = t_val;
+	*u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+	u8 *p_ctx, *region1_val_ptr;
+	u8 region1_val;
+
+	p_ctx = (u8 * const)p_ctx_mem;
+	region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+	region1_val = *region1_val_ptr;
+
+	memset(p_ctx, 0, ctx_size);
+
+	*region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt)
+{
+	u32 ctx_validation;
+
+	/* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+	/* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+	qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+	/* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+	ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+	qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index e3f3688..3bb76da 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
 }
 
 /* init_ops callbacks entry point */
-static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
-			    struct qed_ptt *p_ptt,
-			    struct init_callback_op *p_cmd)
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt,
+			   struct init_callback_op *p_cmd)
 {
-	DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+	int rc;
+
+	switch (p_cmd->callback_id) {
+	case DMAE_READY_CB:
+		rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+		break;
+	default:
+		DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+			  p_cmd->callback_id);
+		return -EINVAL;
+	}
+
+	return rc;
 }
 
 static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
 			break;
 
 		case INIT_OP_CALLBACK:
-			qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+			rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
 			break;
 		}
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 719cdbf..d3eabcf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -59,10 +59,10 @@ struct qed_pi_info {
 };
 
 struct qed_sb_sp_info {
-	struct qed_sb_info	sb_info;
+	struct qed_sb_info sb_info;
 
 	/* per protocol index data */
-	struct qed_pi_info	pi_info_arr[PIS_PER_SB];
+	struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
 };
 
 enum qed_attention_type {
@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
 #define ATTENTION_LENGTH_SHIFT          (4)
 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
 					 ATTENTION_LENGTH_SHIFT)
-#define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE                BIT(ATTENTION_LENGTH_SHIFT)
 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
 					 ATTENTION_PARITY)
@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
 	if (IS_VF(p_hwfn->cdev))
 		return;
 
-	sb_offset = igu_sb_id * PIS_PER_SB;
+	sb_offset = igu_sb_id * PIS_PER_SB_E4;
 	memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
 
 	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 5199634..54b4ee0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
 #define QED_SB_EVENT_MASK       0x0003
 
 #define SB_ALIGNED_SIZE(p_hwfn)	\
-	ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+	ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
 
 #define QED_SB_INVALID_IDX      0xffff
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 813c77c..c0d4a54 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -62,22 +62,6 @@
 #include "qed_sriov.h"
 #include "qed_reg_addr.h"
 
-static int
-qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
-		      u8 fw_event_code,
-		      u16 echo, union event_ring_data *data, u8 fw_return_code)
-{
-	if (p_hwfn->p_iscsi_info->event_cb) {
-		struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
-
-		return p_iscsi->event_cb(p_iscsi->event_context,
-					 fw_event_code, data);
-	} else {
-		DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
-		return -EINVAL;
-	}
-}
-
 struct qed_iscsi_conn {
 	struct list_head list_entry;
 	bool free_on_delete;
@@ -105,7 +89,7 @@ struct qed_iscsi_conn {
 	u8 local_mac[6];
 	u8 remote_mac[6];
 	u16 vlan_id;
-	u8 tcp_flags;
+	u16 tcp_flags;
 	u8 ip_version;
 	u32 remote_ip[4];
 	u32 local_ip[4];
@@ -122,7 +106,6 @@ struct qed_iscsi_conn {
 	u32 ss_thresh;
 	u16 srtt;
 	u16 rtt_var;
-	u32 ts_time;
 	u32 ts_recent;
 	u32 ts_recent_age;
 	u32 total_rt;
@@ -144,7 +127,6 @@ struct qed_iscsi_conn {
 	u16 mss;
 	u8 snd_wnd_scale;
 	u8 rcv_wnd_scale;
-	u32 ts_ticks_per_second;
 	u16 da_timeout_value;
 	u8 ack_frequency;
 
@@ -162,6 +144,22 @@ struct qed_iscsi_conn {
 };
 
 static int
+qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
+		      u8 fw_event_code,
+		      u16 echo, union event_ring_data *data, u8 fw_return_code)
+{
+	if (p_hwfn->p_iscsi_info->event_cb) {
+		struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+		return p_iscsi->event_cb(p_iscsi->event_context,
+					 fw_event_code, data);
+	} else {
+		DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+		return -EINVAL;
+	}
+}
+
+static int
 qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
 			enum spq_mode comp_mode,
 			struct qed_spq_comp_cb *p_comp_addr,
@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
 	p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
 	p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
 	p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
-	p_init->ooo_enable = p_params->ooo_enable;
 	p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
 				  p_params->ll2_ooo_queue_id;
+
 	p_init->func_params.log_page_size = p_params->log_page_size;
 	val = p_params->num_tasks;
 	p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
 	val = p_params->tx_sws_timer;
 	p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
-	p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
+	p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
 
 	p_hwfn->p_iscsi_info->event_context = event_context;
 	p_hwfn->p_iscsi_info->event_cb = async_event_cb;
@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 	int rc = 0;
 	u32 dval;
 	u16 wval;
-	u8 i;
 	u16 *p;
+	u8 i;
 
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 
 		p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
 
-		p_tcp->flags = p_conn->tcp_flags;
+		p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
 		p_tcp->ip_version = p_conn->ip_version;
 		for (i = 0; i < 4; i++) {
 			dval = p_conn->remote_ip[i];
@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 		p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
 
 		p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
-		p_tcp2->flags = p_conn->tcp_flags;
+		p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
 
 		p_tcp2->ip_version = p_conn->ip_version;
 		for (i = 0; i < 4; i++) {
@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 		p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
 		p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
 		p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+		p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+		p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+		p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+		p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+		p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
 	}
 
 	return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
 	}
 }
 
-static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
-				      struct qed_iscsi_conn *p_conn)
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
 {
 	if (!p_conn->queue_cnts_virt_addr)
 		goto nomem;
@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
 		rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
 
 	if (!rc)
-		rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
+		rc = qed_iscsi_setup_connection(p_conn);
 
 	if (rc) {
 		spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
 	con->ss_thresh = conn_info->ss_thresh;
 	con->srtt = conn_info->srtt;
 	con->rtt_var = conn_info->rtt_var;
-	con->ts_time = conn_info->ts_time;
 	con->ts_recent = conn_info->ts_recent;
 	con->ts_recent_age = conn_info->ts_recent_age;
 	con->total_rt = conn_info->total_rt;
@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
 	con->mss = conn_info->mss;
 	con->snd_wnd_scale = conn_info->snd_wnd_scale;
 	con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
-	con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
 	con->da_timeout_value = conn_info->da_timeout_value;
 	con->ack_frequency = conn_info->ack_frequency;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 409041e..ca4a81d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
 
 #define QED_IWARP_INVALID_TCP_CID	0xffffffff
 #define QED_IWARP_RCV_WND_SIZE_DEF	(256 * 1024)
-#define QED_IWARP_RCV_WND_SIZE_MIN	(64 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
 #define TIMESTAMP_HEADER_SIZE		(12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
 
 #define QED_IWARP_TS_EN			BIT(0)
 #define QED_IWARP_DA_EN			BIT(1)
 #define QED_IWARP_PARAM_CRC_NEEDED	(1)
 #define QED_IWARP_PARAM_P2P		(1)
 
+#define QED_IWARP_DEF_MAX_RT_TIME	(0)
+#define QED_IWARP_DEF_CWND_FACTOR	(4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
+#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
+
 static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
 				 u8 fw_event_code, u16 echo,
 				 union event_ring_data *data,
@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 }
 
-void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-			      struct iwarp_init_func_params *p_ramrod)
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+			 struct iwarp_init_func_ramrod_data *p_ramrod)
 {
-	p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
-				    p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+	p_ramrod->iwarp.ll2_ooo_q_index =
+		RESC_START(p_hwfn, QED_LL2_QUEUE) +
+		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+	return;
 }
 
 static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 	tcp->ttl = 0x40;
 	tcp->tos_or_tc = 0;
 
+	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+	tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
+	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+	tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
+	tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+
 	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 	tcp->connect_mode = ep->connect_mode;
 
@@ -807,6 +826,7 @@ static int
 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 {
 	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+	struct qed_iwarp_info *iwarp_info;
 	struct qed_sp_init_data init_data;
 	dma_addr_t async_output_phys;
 	struct qed_spq_entry *p_ent;
@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 		p_mpa_ramrod->common.reject = 1;
 	}
 
+	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
 	p_mpa_ramrod->mode = ep->mpa_rev;
 	SET_FIELD(p_mpa_ramrod->rtr_pref,
 		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
 	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
 	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
 	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
 	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index c1ecd74..b8f612d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -95,6 +95,7 @@ struct qed_iwarp_info {
 	spinlock_t iw_lock;	/* for iwarp resources */
 	spinlock_t qp_lock;	/* for teardown races */
 	u32 rcv_wnd_scale;
+	u16 rcv_wnd_size;
 	u16 max_mtu;
 	u8 mac_addr[ETH_ALEN];
 	u8 crc_needed;
@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		    struct qed_rdma_start_in_params *params);
 
 void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
-			      struct iwarp_init_func_params *p_ramrod);
+			      struct iwarp_init_func_ramrod_data *p_ramrod);
 
 int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 0853389..893ef08 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
 	struct qed_queue_cid *p_cid;
 	int rc;
 
-	p_cid = vmalloc(sizeof(*p_cid));
+	p_cid = vzalloc(sizeof(*p_cid));
 	if (!p_cid)
 		return NULL;
-	memset(p_cid, 0, sizeof(*p_cid));
 
 	p_cid->opaque_fid = opaque_fid;
 	p_cid->cid = cid;
@@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
 		_qed_get_vport_stats(cdev, cdev->reset_stats);
 }
 
-static void
-qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-			struct qed_arfs_config_params *p_cfg_params)
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
 {
-	if (p_cfg_params->arfs_enable) {
-		qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
-					p_cfg_params->tcp, p_cfg_params->udp,
-					p_cfg_params->ipv4, p_cfg_params->ipv6);
-		DP_VERBOSE(p_hwfn, QED_MSG_SP,
-			   "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+	if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+		return GFT_PROFILE_TYPE_4_TUPLE;
+	if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+		return GFT_PROFILE_TYPE_IP_DST_PORT;
+	return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt,
+			     struct qed_arfs_config_params *p_cfg_params)
+{
+	if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+		qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+			       p_cfg_params->tcp,
+			       p_cfg_params->udp,
+			       p_cfg_params->ipv4,
+			       p_cfg_params->ipv6,
+			       qed_arfs_mode_to_hsi(p_cfg_params->mode));
+		DP_VERBOSE(p_hwfn,
+			   QED_MSG_SP,
+			   "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
 			   p_cfg_params->tcp ? "Enable" : "Disable",
 			   p_cfg_params->udp ? "Enable" : "Disable",
 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
-			   p_cfg_params->ipv6 ? "Enable" : "Disable");
+			   p_cfg_params->ipv6 ? "Enable" : "Disable",
+			   (u32)p_cfg_params->mode);
 	} else {
-		qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+		DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+		qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
 	}
-
-	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
-		   p_cfg_params->arfs_enable ? "Enable" : "Disable");
 }
 
-static int
-qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
 				struct qed_spq_comp_cb *p_cb,
-				dma_addr_t p_addr, u16 length, u16 qid,
-				u8 vport_id, bool b_is_add)
+				struct qed_ntuple_filter_params *p_params)
 {
 	struct rx_update_gft_filter_data *p_ramrod = NULL;
 	struct qed_spq_entry *p_ent = NULL;
@@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 	u8 abs_vport_id = 0;
 	int rc = -EINVAL;
 
-	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
 	if (rc)
 		return rc;
 
-	rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
-	if (rc)
-		return rc;
+	if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+		rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
+		if (rc)
+			return rc;
+	}
 
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
@@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 		return rc;
 
 	p_ramrod = &p_ent->ramrod.rx_update_gft;
-	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
-	p_ramrod->pkt_hdr_length = cpu_to_le16(length);
-	p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
-	p_ramrod->vport_id = abs_vport_id;
-	p_ramrod->filter_type = RFS_FILTER_TYPE;
-	p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+	p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+	if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+		p_ramrod->rx_qid_valid = 1;
+		p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+	}
+
+	p_ramrod->flow_id_valid = 0;
+	p_ramrod->flow_id = 0;
+
+	p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+	p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+	    : GFT_DELETE_FILTER;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
 		   abs_vport_id, abs_rx_q_id,
-		   b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+		   p_params->b_is_add ? "Adding" : "Removing",
+		   (u64)p_params->addr, p_params->length);
 
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
@@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
 	}
 }
 
-static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+				       enum qed_filter_config_mode mode)
 {
 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 	struct qed_arfs_config_params arfs_config_params;
@@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
 	arfs_config_params.udp = true;
 	arfs_config_params.ipv4 = true;
 	arfs_config_params.ipv6 = true;
-	arfs_config_params.arfs_enable = en_searcher;
-
+	arfs_config_params.mode = mode;
 	qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
 				&arfs_config_params);
 	return 0;
@@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
 
 static void
 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
-			     void *cookie, union event_ring_data *data,
-			     u8 fw_return_code)
+			     void *cookie,
+			     union event_ring_data *data, u8 fw_return_code)
 {
 	struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
 	void *dev = p_hwfn->cdev->ops_cookie;
@@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
 	op->arfs_filter_op(dev, cookie, fw_return_code);
 }
 
-static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
-					 dma_addr_t mapping, u16 length,
-					 u16 vport_id, u16 rx_queue_id,
-					 bool add_filter)
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+			      void *cookie,
+			      struct qed_ntuple_filter_params *params)
 {
 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
 	struct qed_spq_comp_cb cb;
@@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
 	cb.function = qed_arfs_sp_response_handler;
 	cb.cookie = cookie;
 
-	rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
-					     &cb, mapping, length, rx_queue_id,
-					     vport_id, add_filter);
+	if (params->b_is_vf) {
+		if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+					   false)) {
+			DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+				params->vf_id);
+			return rc;
+		}
+
+		params->vport_id = params->vf_id + 1;
+		params->qid = QED_RFS_NTUPLE_QID_RSS;
+	}
+
+	rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
 	if (rc)
 		DP_NOTICE(p_hwfn,
 			  "Failed to issue a-RFS filter configuration\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index cc1f248..c4030e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
 	bool udp;
 	bool ipv4;
 	bool ipv6;
-	bool arfs_enable;
+	enum qed_filter_config_mode mode;
 };
 
 struct qed_sp_vport_update_params {
@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
 
 void qed_reset_vport_stats(struct qed_dev *cdev);
 
+/**
+ * *@brief qed_arfs_mode_configure -
+ *
+ **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ **@param p_hwfn
+ **@param p_ptt
+ **@param p_cfg_params - arfs mode configuration parameters.
+ *
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt,
+			     struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - qed_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
+ *		  it with cookie and callback function address, if not
+ *		  using this mode then client must pass NULL.
+ * @params p_params
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+				struct qed_spq_comp_cb *p_cb,
+				struct qed_ntuple_filter_params *p_params);
+
 #define MAX_QUEUES_PER_QZONE    (sizeof(unsigned long) * 8)
 #define QED_QUEUE_CID_SELF	(0xff)
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 047f556..c4f14fd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
 	data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
 	data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
 	data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+	data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+	data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
 }
 
 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
 		       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
 
 	p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
-	p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
+	p_ramrod->inner_vlan_stripping_en =
+		p_ll2_conn->input.rx_vlan_removal_en;
 	p_ramrod->queue_id = p_ll2_conn->queue_id;
 	p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
 
@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
 
 	memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
 
-	p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
-			      CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+	switch (data->input.tx_dest) {
+	case QED_LL2_TX_DEST_NW:
+		p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+		break;
+	case QED_LL2_TX_DEST_LB:
+		p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+		break;
+	case QED_LL2_TX_DEST_DROP:
+		p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	if (data->input.conn_type == QED_LL2_TYPE_OOO ||
 	    data->input.secondary_queue)
 		p_ll2_info->main_func_queue = false;
@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 		goto release_terminate;
 	}
 
-	if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-	    cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
+	if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
 		DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
 		rc = qed_ll2_start_ooo(cdev, params);
 		if (rc) {
@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
 	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
 	eth_zero_addr(cdev->ll2_mac_address);
 
-	if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
-	    cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
+	if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
 		qed_ll2_stop_ooo(cdev);
 
 	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 8b99c7d..6f46cb1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
 					DRV_MSG_CODE_NVM_READ_NVRAM,
 					addr + offset +
 					(bytes_to_copy <<
-					 DRV_MB_PARAM_NVM_LEN_SHIFT),
+					 DRV_MB_PARAM_NVM_LEN_OFFSET),
 					&resp, &resp_param,
 					&read_len,
 					(u32 *)(p_buf + offset));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index c8c4b39..bdc46f1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
 
 	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
 		qed_iwarp_init_fw_ramrod(p_hwfn,
-					 &p_ent->ramrod.iwarp_init_func.iwarp);
+					 &p_ent->ramrod.iwarp_init_func);
 		p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
 	} else {
 		p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 0cdb433..f712205 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -124,6 +124,8 @@
 	0x1f0434UL
 #define PRS_REG_SEARCH_TAG1 \
 	0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+	0x1f044cUL
 #define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
 	0x1f0a0cUL
 #define PRS_REG_SEARCH_TCP_FIRST_FRAG \
@@ -200,7 +202,13 @@
 	0x2e8800UL
 #define CCFC_REG_STRONG_ENABLE_VF \
 	0x2e070cUL
-#define  CDU_REG_CID_ADDR_PARAMS	\
+#define CDU_REG_CCFC_CTX_VALID0 \
+	0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+	0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+	0x580408UL
+#define  CDU_REG_CID_ADDR_PARAMS \
 	0x580900UL
 #define  DBG_REG_CLIENT_ENABLE \
 	0x010004UL
@@ -564,7 +572,7 @@
 #define PRS_REG_ENCAPSULATION_TYPE_EN	0x1f0730UL
 #define PRS_REG_GRE_PROTOCOL		0x1f0734UL
 #define PRS_REG_VXLAN_PORT		0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0	0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2	0x1f099cUL
 #define NIG_REG_ENC_TYPE_ENABLE		0x501058UL
 
 #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE		(0x1 << 0)
@@ -580,11 +588,11 @@
 #define PRS_REG_NGE_PORT		0x1f086cUL
 #define NIG_REG_NGE_PORT		0x508b38UL
 
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN	0x10090cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN	0x100910UL
-#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN	0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN	0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN	0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN		0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN		0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN		0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5		0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5	0x100930UL
 
 #define NIG_REG_NGE_IP_ENABLE			0x508b28UL
 #define NIG_REG_NGE_ETH_ENABLE			0x508b2cUL
@@ -595,15 +603,15 @@
 #define QM_REG_WFQPFWEIGHT	0x2f4e80UL
 #define QM_REG_WFQVPWEIGHT	0x2fa000UL
 
-#define PGLCS_REG_DBG_SELECT_K2 \
+#define PGLCS_REG_DBG_SELECT_K2_E5 \
 	0x001d14UL
-#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x001d18UL
-#define PGLCS_REG_DBG_SHIFT_K2 \
+#define PGLCS_REG_DBG_SHIFT_K2_E5 \
 	0x001d1cUL
-#define PGLCS_REG_DBG_FORCE_VALID_K2 \
+#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
 	0x001d20UL
-#define PGLCS_REG_DBG_FORCE_FRAME_K2 \
+#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x001d24UL
 #define MISC_REG_RESET_PL_PDA_VMAIN_1 \
 	0x008070UL
@@ -615,7 +623,7 @@
 	0x009050UL
 #define MISCS_REG_RESET_PL_HV \
 	0x009060UL
-#define MISCS_REG_RESET_PL_HV_2_K2	\
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
 	0x009150UL
 #define DMAE_REG_DBG_SELECT \
 	0x00c510UL
@@ -647,15 +655,15 @@
 	0x0500b0UL
 #define GRC_REG_DBG_FORCE_FRAME	\
 	0x0500b4UL
-#define UMAC_REG_DBG_SELECT_K2 \
+#define UMAC_REG_DBG_SELECT_K2_E5 \
 	0x051094UL
-#define UMAC_REG_DBG_DWORD_ENABLE_K2 \
+#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x051098UL
-#define UMAC_REG_DBG_SHIFT_K2 \
+#define UMAC_REG_DBG_SHIFT_K2_E5 \
 	0x05109cUL
-#define UMAC_REG_DBG_FORCE_VALID_K2 \
+#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
 	0x0510a0UL
-#define UMAC_REG_DBG_FORCE_FRAME_K2 \
+#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x0510a4UL
 #define MCP2_REG_DBG_SELECT \
 	0x052400UL
@@ -717,15 +725,15 @@
 	0x1f0ba0UL
 #define PRS_REG_DBG_FORCE_FRAME	\
 	0x1f0ba4UL
-#define CNIG_REG_DBG_SELECT_K2 \
+#define CNIG_REG_DBG_SELECT_K2_E5 \
 	0x218254UL
-#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x218258UL
-#define CNIG_REG_DBG_SHIFT_K2 \
+#define CNIG_REG_DBG_SHIFT_K2_E5 \
 	0x21825cUL
-#define CNIG_REG_DBG_FORCE_VALID_K2 \
+#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
 	0x218260UL
-#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x218264UL
 #define PRM_REG_DBG_SELECT \
 	0x2306a8UL
@@ -997,35 +1005,35 @@
 	0x580710UL
 #define CDU_REG_DBG_FORCE_FRAME	\
 	0x580714UL
-#define WOL_REG_DBG_SELECT_K2 \
+#define WOL_REG_DBG_SELECT_K2_E5 \
 	0x600140UL
-#define WOL_REG_DBG_DWORD_ENABLE_K2 \
+#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x600144UL
-#define WOL_REG_DBG_SHIFT_K2 \
+#define WOL_REG_DBG_SHIFT_K2_E5 \
 	0x600148UL
-#define WOL_REG_DBG_FORCE_VALID_K2 \
+#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
 	0x60014cUL
-#define WOL_REG_DBG_FORCE_FRAME_K2 \
+#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x600150UL
-#define BMBN_REG_DBG_SELECT_K2 \
+#define BMBN_REG_DBG_SELECT_K2_E5 \
 	0x610140UL
-#define BMBN_REG_DBG_DWORD_ENABLE_K2 \
+#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x610144UL
-#define BMBN_REG_DBG_SHIFT_K2 \
+#define BMBN_REG_DBG_SHIFT_K2_E5 \
 	0x610148UL
-#define BMBN_REG_DBG_FORCE_VALID_K2 \
+#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
 	0x61014cUL
-#define BMBN_REG_DBG_FORCE_FRAME_K2 \
+#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x610150UL
-#define NWM_REG_DBG_SELECT_K2 \
+#define NWM_REG_DBG_SELECT_K2_E5 \
 	0x8000ecUL
-#define NWM_REG_DBG_DWORD_ENABLE_K2 \
+#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x8000f0UL
-#define NWM_REG_DBG_SHIFT_K2 \
+#define NWM_REG_DBG_SHIFT_K2_E5 \
 	0x8000f4UL
-#define NWM_REG_DBG_FORCE_VALID_K2 \
+#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
 	0x8000f8UL
-#define NWM_REG_DBG_FORCE_FRAME_K2\
+#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x8000fcUL
 #define PBF_REG_DBG_SELECT \
 	0xd80060UL
@@ -1247,36 +1255,76 @@
 	0x1901534UL
 #define USEM_REG_DBG_FORCE_FRAME \
 	0x1901538UL
-#define NWS_REG_DBG_SELECT_K2 \
+#define NWS_REG_DBG_SELECT_K2_E5 \
 	0x700128UL
-#define NWS_REG_DBG_DWORD_ENABLE_K2 \
+#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x70012cUL
-#define NWS_REG_DBG_SHIFT_K2 \
+#define NWS_REG_DBG_SHIFT_K2_E5 \
 	0x700130UL
-#define NWS_REG_DBG_FORCE_VALID_K2 \
+#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
 	0x700134UL
-#define NWS_REG_DBG_FORCE_FRAME_K2 \
+#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x700138UL
-#define MS_REG_DBG_SELECT_K2 \
+#define MS_REG_DBG_SELECT_K2_E5 \
 	0x6a0228UL
-#define MS_REG_DBG_DWORD_ENABLE_K2 \
+#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
 	0x6a022cUL
-#define MS_REG_DBG_SHIFT_K2 \
+#define MS_REG_DBG_SHIFT_K2_E5 \
 	0x6a0230UL
-#define MS_REG_DBG_FORCE_VALID_K2 \
+#define MS_REG_DBG_FORCE_VALID_K2_E5 \
 	0x6a0234UL
-#define MS_REG_DBG_FORCE_FRAME_K2 \
+#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
 	0x6a0238UL
-#define PCIE_REG_DBG_COMMON_SELECT_K2 \
+#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
 	0x054398UL
-#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
 	0x05439cUL
-#define PCIE_REG_DBG_COMMON_SHIFT_K2 \
+#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
 	0x0543a0UL
-#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
 	0x0543a4UL
-#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
 	0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+	0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+	0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+	0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+	0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+	0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+	0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+	0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+	0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+	0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+	0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5	\
+	0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+	0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+	0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+	0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+	0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5	\
+	0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+	0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+	0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+	0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+	0x322050UL
 #define MISC_REG_RESET_PL_UA \
 	0x008050UL
 #define MISC_REG_RESET_PL_HV \
@@ -1415,7 +1463,7 @@
 	0x1940000UL
 #define SEM_FAST_REG_INT_RAM \
 	0x020000UL
-#define SEM_FAST_REG_INT_RAM_SIZE \
+#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
 	20480
 #define GRC_REG_TRACE_FIFO_VALID_DATA \
 	0x050064UL
@@ -1433,6 +1481,8 @@
 	0x340800UL
 #define BRB_REG_BIG_RAM_DATA \
 	0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+	64
 #define SEM_FAST_REG_STALL_0_BB_K2 \
 	0x000488UL
 #define SEM_FAST_REG_STALLED \
@@ -1451,7 +1501,7 @@
 	0x238c30UL
 #define MISCS_REG_BLOCK_256B_EN \
 	0x009074UL
-#define MCP_REG_SCRATCH_SIZE \
+#define MCP_REG_SCRATCH_SIZE_BB_K2 \
 	57344
 #define MCP_REG_CPU_REG_FILE \
 	0xe05200UL
@@ -1485,35 +1535,35 @@
 	0x008c14UL
 #define NWS_REG_NWS_CMU_K2	\
 	0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
 	0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
 	0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
 	0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
 	0x0006c4UL
-#define MS_REG_MS_CMU_K2 \
+#define MS_REG_MS_CMU_K2_E5 \
 	0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
 	0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
 	0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
 	0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
 	0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
 	0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
 	0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
 	0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
 	0x000214UL
-#define PHY_PCIE_REG_PHY0_K2 \
+#define PHY_PCIE_REG_PHY0_K2_E5 \
 	0x620000UL
-#define PHY_PCIE_REG_PHY1_K2 \
+#define PHY_PCIE_REG_PHY1_K2_E5 \
 	0x624000UL
 #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
 #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index a1d33f3..5e927b6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
 		p_ramrod->mf_mode = MF_NPAR;
 	}
-	p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+	p_ramrod->outer_tag_config.outer_tag.tci =
+		cpu_to_le16(p_hwfn->hw_info.ovlan);
 
 	/* Place EQ address in RAMROD */
 	DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 	p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
-		   "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-		   sb, sb_index, p_ramrod->outer_tag);
+		   "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+		   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
 
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index be48d9a..217b62a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 				  struct qed_spq *p_spq)
 {
-	struct core_conn_context *p_cxt;
+	struct e4_core_conn_context *p_cxt;
 	struct qed_cxt_info cxt_info;
 	u16 physical_q;
 	int rc;
@@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
 	p_cxt = cxt_info.p_cxt;
 
 	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
-		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
 	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
-		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+		  E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
 	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
-		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+		  E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
 
 	/* QM physical queue */
 	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 3f40b1d..5acb91b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
 	return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
-static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
-				  int rel_vf_id,
-				  bool b_enabled_only, bool b_non_malicious)
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+			   int rel_vf_id,
+			   bool b_enabled_only, bool b_non_malicious)
 {
 	if (!p_hwfn->pf_iov_info) {
 		DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
 	/* fill in pfdev info */
 	pfdev_info->chip_num = p_hwfn->cdev->chip_num;
 	pfdev_info->db_size = 0;
-	pfdev_info->indices_per_sb = PIS_PER_SB;
+	pfdev_info->indices_per_sb = PIS_PER_SB_E4;
 
 	pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
 				   PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -3582,11 +3582,11 @@ static int
 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
 			struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
 {
-	u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+	u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
 	int i, cnt;
 
 	/* Read initial consumers & producers */
-	for (i = 0; i < MAX_NUM_VOQS; i++) {
+	for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
 		u32 prod;
 
 		cons[i] = qed_rd(p_hwfn, p_ptt,
@@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
 	/* Wait for consumers to pass the producers */
 	i = 0;
 	for (cnt = 0; cnt < 50; cnt++) {
-		for (; i < MAX_NUM_VOQS; i++) {
+		for (; i < MAX_NUM_VOQS_E4; i++) {
 			u32 tmp;
 
 			tmp = qed_rd(p_hwfn, p_ptt,
@@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
 				break;
 		}
 
-		if (i == MAX_NUM_VOQS)
+		if (i == MAX_NUM_VOQS_E4)
 			break;
 
 		msleep(20);
@@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
 				     struct qed_ptt *p_ptt, int vfid, int val)
 {
+	struct qed_mcp_link_state *p_link;
 	struct qed_vf_info *vf;
 	u8 abs_vp_id = 0;
 	int rc;
@@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
 	if (rc)
 		return rc;
 
-	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+	p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+	return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+				 p_link->speed);
 }
 
 static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 3955929..9a8fd79 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -274,6 +274,23 @@ enum qed_iov_wq_flag {
 
 #ifdef CONFIG_QED_SRIOV
 /**
+ * @brief Check if given VF ID @vfid is valid
+ *        w.r.t. @b_enabled_only value
+ *        if b_enabled_only = true - only enabled VF id is valid
+ *        else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ *
+ * @return bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+			   int rel_vf_id,
+			   bool b_enabled_only, bool b_non_malicious);
+
+/**
  * @brief - Given a VF index, return index of next [including that] active VF.
  *
  * @param p_hwfn
@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
 #else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+		      int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+	return false;
+}
+
 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
 					     u16 rel_vf_id)
 {
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index a3a70ad..9935978c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -40,6 +40,7 @@
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/bpf.h>
+#include <net/xdp.h>
 #include <linux/qed/qede_rdma.h>
 #include <linux/io.h>
 #ifdef CONFIG_RFS_ACCEL
@@ -52,9 +53,9 @@
 #include <linux/qed/qed_eth_if.h>
 
 #define QEDE_MAJOR_VERSION		8
-#define QEDE_MINOR_VERSION		10
-#define QEDE_REVISION_VERSION		10
-#define QEDE_ENGINEERING_VERSION	21
+#define QEDE_MINOR_VERSION		33
+#define QEDE_REVISION_VERSION		0
+#define QEDE_ENGINEERING_VERSION	20
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
 		__stringify(QEDE_MINOR_VERSION) "."		\
 		__stringify(QEDE_REVISION_VERSION) "."		\
@@ -345,6 +346,7 @@ struct qede_rx_queue {
 	u64 xdp_no_pass;
 
 	void *handle;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 union db_prod {
@@ -494,6 +496,8 @@ int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
 void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
 int qede_configure_vlan_filters(struct qede_dev *edev);
 
+netdev_features_t qede_fix_features(struct net_device *dev,
+				    netdev_features_t features);
 int qede_set_features(struct net_device *dev, netdev_features_t features);
 void qede_set_rx_mode(struct net_device *ndev);
 void qede_config_rx_mode(struct net_device *ndev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index dae7412..4ca3847 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -940,6 +940,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu)
 	DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
 		   "Configuring MTU size of %d\n", new_mtu);
 
+	if (new_mtu > PAGE_SIZE)
+		ndev->features &= ~NETIF_F_GRO_HW;
+
 	/* Set the mtu field and re-start the interface if needed */
 	args.u.mtu = new_mtu;
 	args.func = &qede_update_mtu;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index c1a0708..6687e04 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
 				     u16 rxq_id, bool add_fltr)
 {
 	const struct qed_eth_ops *op = edev->ops;
+	struct qed_ntuple_filter_params params;
 
 	if (n->used)
 		return;
 
+	memset(&params, 0, sizeof(params));
+
+	params.addr = n->mapping;
+	params.length = n->buf_len;
+	params.qid = rxq_id;
+	params.b_is_add = add_fltr;
+
 	DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
 		   "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
 		   add_fltr ? "Adding" : "Deleting",
@@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
 
 	n->used = true;
 	n->filter_op = add_fltr;
-	op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
-				 rxq_id, add_fltr);
+	op->ntuple_filter_config(edev->cdev, n, &params);
 }
 
 static void
@@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
 	edev->arfs->filter_count++;
 
 	if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
-		edev->ops->configure_arfs_searcher(edev->cdev, true);
+		enum qed_filter_config_mode mode;
+
+		mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+		edev->ops->configure_arfs_searcher(edev->cdev, mode);
 		edev->arfs->enable = true;
 	}
 
@@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
 	edev->arfs->filter_count--;
 
 	if (!edev->arfs->filter_count && edev->arfs->enable) {
+		enum qed_filter_config_mode mode;
+
+		mode = QED_FILTER_CONFIG_MODE_DISABLE;
 		edev->arfs->enable = false;
-		edev->ops->configure_arfs_searcher(edev->cdev, false);
+		edev->ops->configure_arfs_searcher(edev->cdev, mode);
 	}
 }
 
@@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
 
 	if (!edev->arfs->filter_count) {
 		if (edev->arfs->enable) {
+			enum qed_filter_config_mode mode;
+
+			mode = QED_FILTER_CONFIG_MODE_DISABLE;
 			edev->arfs->enable = false;
-			edev->ops->configure_arfs_searcher(edev->cdev, false);
+			edev->ops->configure_arfs_searcher(edev->cdev, mode);
 		}
 #ifdef CONFIG_RFS_ACCEL
 	} else {
@@ -895,19 +911,26 @@ static void qede_set_features_reload(struct qede_dev *edev,
 	edev->ndev->features = args->u.features;
 }
 
+netdev_features_t qede_fix_features(struct net_device *dev,
+				    netdev_features_t features)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+	    !(features & NETIF_F_GRO))
+		features &= ~NETIF_F_GRO_HW;
+
+	return features;
+}
+
 int qede_set_features(struct net_device *dev, netdev_features_t features)
 {
 	struct qede_dev *edev = netdev_priv(dev);
 	netdev_features_t changes = features ^ dev->features;
 	bool need_reload = false;
 
-	/* No action needed if hardware GRO is disabled during driver load */
-	if (changes & NETIF_F_GRO) {
-		if (dev->features & NETIF_F_GRO)
-			need_reload = !edev->gro_disable;
-		else
-			need_reload = edev->gro_disable;
-	}
+	if (changes & NETIF_F_GRO_HW)
+		need_reload = true;
 
 	if (need_reload) {
 		struct qede_reload_args args;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 48ec4c5..dafc079 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
 	xdp.data = xdp.data_hard_start + *data_offset;
 	xdp_set_data_meta_invalid(&xdp);
 	xdp.data_end = xdp.data + *len;
+	xdp.rxq = &rxq->xdp_rxq;
 
 	/* Queues always have a full reset currently, so for the time
 	 * being until there's atomic program replace just mark read
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 8f9b3eb..2db70ea 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -545,6 +545,7 @@ static const struct net_device_ops qede_netdev_ops = {
 #endif
 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+	.ndo_fix_features = qede_fix_features,
 	.ndo_set_features = qede_set_features,
 	.ndo_get_stats64 = qede_get_stats64,
 #ifdef CONFIG_QED_SRIOV
@@ -572,6 +573,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
 	.ndo_change_mtu = qede_change_mtu,
 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+	.ndo_fix_features = qede_fix_features,
 	.ndo_set_features = qede_set_features,
 	.ndo_get_stats64 = qede_get_stats64,
 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -589,6 +591,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
 	.ndo_change_mtu = qede_change_mtu,
 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+	.ndo_fix_features = qede_fix_features,
 	.ndo_set_features = qede_set_features,
 	.ndo_get_stats64 = qede_get_stats64,
 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
@@ -676,7 +679,7 @@ static void qede_init_ndev(struct qede_dev *edev)
 	ndev->priv_flags |= IFF_UNICAST_FLT;
 
 	/* user-changeble features */
-	hw_features = NETIF_F_GRO | NETIF_F_SG |
+	hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		      NETIF_F_TSO | NETIF_F_TSO6;
 
@@ -762,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev)
 			fp = &edev->fp_array[i];
 
 			kfree(fp->sb_info);
+			/* Handle mem alloc failure case where qede_init_fp
+			 * didn't register xdp_rxq_info yet.
+			 * Implicit only (fp->type & QEDE_FASTPATH_RX)
+			 */
+			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
+				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
 			kfree(fp->rxq);
 			kfree(fp->xdp_tx);
 			kfree(fp->txq);
@@ -1068,10 +1077,6 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 
 	pci_set_drvdata(pdev, NULL);
 
-	/* Release edev's reference to XDP's bpf if such exist */
-	if (edev->xdp_prog)
-		bpf_prog_put(edev->xdp_prog);
-
 	/* Use global ops since we've freed edev */
 	qed_ops->common->slowpath_stop(cdev);
 	if (system_state == SYSTEM_POWER_OFF)
@@ -1148,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
 static int qede_alloc_mem_sb(struct qede_dev *edev,
 			     struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int rc;
 
@@ -1232,18 +1237,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 	dma_addr_t mapping;
 	int i;
 
-	/* Don't perform FW aggregations in case of XDP */
-	if (edev->xdp_prog)
-		edev->gro_disable = 1;
-
 	if (edev->gro_disable)
 		return 0;
 
-	if (edev->ndev->mtu > PAGE_SIZE) {
-		edev->gro_disable = 1;
-		return 0;
-	}
-
 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
@@ -1273,6 +1269,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 err:
 	qede_free_sge_mem(edev, rxq);
 	edev->gro_disable = 1;
+	edev->ndev->features &= ~NETIF_F_GRO_HW;
 	return -ENOMEM;
 }
 
@@ -1502,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev)
 			else
 				fp->rxq->data_direction = DMA_FROM_DEVICE;
 			fp->rxq->dev = &edev->pdev->dev;
+
+			/* Driver have no error path from here */
+			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
+						 fp->rxq->rxq_id) < 0);
 		}
 
 		if (fp->type & QEDE_FASTPATH_TX) {
@@ -1515,7 +1516,7 @@ static void qede_init_fp(struct qede_dev *edev)
 			 edev->ndev->name, queue_id);
 	}
 
-	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
+	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
 }
 
 static int qede_set_real_num_queues(struct qede_dev *edev)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index df21e90..7e7704d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -143,11 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 			 struct nlattr *tb[], struct nlattr *data[],
 			 struct netlink_ext_ack *extack)
 {
-	int ingress_format = RMNET_INGRESS_FORMAT_DEMUXING |
-			     RMNET_INGRESS_FORMAT_DEAGGREGATION |
-			     RMNET_INGRESS_FORMAT_MAP;
-	int egress_format = RMNET_EGRESS_FORMAT_MUXING |
-			    RMNET_EGRESS_FORMAT_MAP;
+	u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION;
 	struct net_device *real_dev;
 	int mode = RMNET_EPMODE_VND;
 	struct rmnet_endpoint *ep;
@@ -181,13 +177,20 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 	if (err)
 		goto err2;
 
-	netdev_dbg(dev, "data format [ingress 0x%08X] [egress 0x%08X]\n",
-		   ingress_format, egress_format);
-	port->egress_data_format = egress_format;
-	port->ingress_data_format = ingress_format;
 	port->rmnet_mode = mode;
 
 	hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+	if (data[IFLA_VLAN_FLAGS]) {
+		struct ifla_vlan_flags *flags;
+
+		flags = nla_data(data[IFLA_VLAN_FLAGS]);
+		data_format = flags->flags & flags->mask;
+	}
+
+	netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+	port->data_format = data_format;
+
 	return 0;
 
 err2:
@@ -317,9 +320,49 @@ static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
 	return 0;
 }
 
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+			    struct nlattr *data[],
+			    struct netlink_ext_ack *extack)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct net_device *real_dev;
+	struct rmnet_endpoint *ep;
+	struct rmnet_port *port;
+	u16 mux_id;
+
+	real_dev = __dev_get_by_index(dev_net(dev),
+				      nla_get_u32(tb[IFLA_LINK]));
+
+	if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+		return -ENODEV;
+
+	port = rmnet_get_port_rtnl(real_dev);
+
+	if (data[IFLA_VLAN_ID]) {
+		mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
+		ep = rmnet_get_endpoint(port, priv->mux_id);
+
+		hlist_del_init_rcu(&ep->hlnode);
+		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+		ep->mux_id = mux_id;
+		priv->mux_id = mux_id;
+	}
+
+	if (data[IFLA_VLAN_FLAGS]) {
+		struct ifla_vlan_flags *flags;
+
+		flags = nla_data(data[IFLA_VLAN_FLAGS]);
+		port->data_format = flags->flags & flags->mask;
+	}
+
+	return 0;
+}
+
 static size_t rmnet_get_size(const struct net_device *dev)
 {
-	return nla_total_size(2); /* IFLA_VLAN_ID */
+	return nla_total_size(2) /* IFLA_VLAN_ID */ +
+	       nla_total_size(sizeof(struct ifla_vlan_flags)); /* IFLA_VLAN_FLAGS */
 }
 
 struct rtnl_link_ops rmnet_link_ops __read_mostly = {
@@ -331,6 +374,7 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
 	.newlink	= rmnet_newlink,
 	.dellink	= rmnet_dellink,
 	.get_size	= rmnet_get_size,
+	.changelink     = rmnet_changelink,
 };
 
 /* Needs either rcu_read_lock() or rtnl lock */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index c19259e..00e4634 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -32,8 +32,7 @@ struct rmnet_endpoint {
  */
 struct rmnet_port {
 	struct net_device *dev;
-	u32 ingress_data_format;
-	u32 egress_data_format;
+	u32 data_format;
 	u8 nr_rmnet_devs;
 	u8 rmnet_mode;
 	struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 08e4afc..601edec 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -15,6 +15,8 @@
 
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
 #include "rmnet_private.h"
 #include "rmnet_config.h"
 #include "rmnet_vnd.h"
@@ -64,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
 			    struct rmnet_port *port)
 {
 	struct rmnet_endpoint *ep;
+	u16 len, pad;
 	u8 mux_id;
-	u16 len;
 
 	if (RMNET_MAP_GET_CD_BIT(skb)) {
-		if (port->ingress_data_format
-		    & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+		if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
 			return rmnet_map_command(skb, port);
 
 		goto free_skb;
 	}
 
 	mux_id = RMNET_MAP_GET_MUX_ID(skb);
-	len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
+	pad = RMNET_MAP_GET_PAD(skb);
+	len = RMNET_MAP_GET_LENGTH(skb) - pad;
 
 	if (mux_id >= RMNET_MAX_LOGICAL_EP)
 		goto free_skb;
@@ -89,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
 
 	/* Subtract MAP header */
 	skb_pull(skb, sizeof(struct rmnet_map_header));
-	skb_trim(skb, len);
 	rmnet_set_skb_proto(skb);
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+
+	skb_trim(skb, len);
 	rmnet_deliver_skb(skb);
 	return;
 
@@ -104,8 +112,17 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
 {
 	struct sk_buff *skbn;
 
-	if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
-		while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
+	if (skb->dev->type == ARPHRD_ETHER) {
+		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, ETH_HLEN);
+	}
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
 			__rmnet_map_ingress_handler(skbn, port);
 
 		consume_skb(skb);
@@ -124,29 +141,32 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
 	additional_header_len = 0;
 	required_headroom = sizeof(struct rmnet_map_header);
 
+	if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) {
+		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+		required_headroom += additional_header_len;
+	}
+
 	if (skb_headroom(skb) < required_headroom) {
 		if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
 			goto fail;
 	}
 
+	if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+		rmnet_map_checksum_uplink_packet(skb, orig_dev);
+
 	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
 	if (!map_header)
 		goto fail;
 
-	if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
-		if (mux_id == 0xff)
-			map_header->mux_id = 0;
-		else
-			map_header->mux_id = mux_id;
-	}
+	map_header->mux_id = mux_id;
 
 	skb->protocol = htons(ETH_P_MAP);
 
-	return RMNET_MAP_SUCCESS;
+	return 0;
 
 fail:
 	kfree_skb(skb);
-	return RMNET_MAP_CONSUMED;
+	return -ENOMEM;
 }
 
 static void
@@ -178,8 +198,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
 
 	switch (port->rmnet_mode) {
 	case RMNET_EPMODE_VND:
-		if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
-			rmnet_map_ingress_handler(skb, port);
+		rmnet_map_ingress_handler(skb, port);
 		break;
 	case RMNET_EPMODE_BRIDGE:
 		rmnet_bridge_handler(skb, port->bridge_ep);
@@ -201,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
 	struct rmnet_priv *priv;
 	u8 mux_id;
 
+	sk_pacing_shift_update(skb->sk, 8);
+
 	orig_dev = skb->dev;
 	priv = netdev_priv(orig_dev);
 	skb->dev = priv->real_dev;
@@ -212,19 +233,8 @@ void rmnet_egress_handler(struct sk_buff *skb)
 		return;
 	}
 
-	if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
-		switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) {
-		case RMNET_MAP_CONSUMED:
-			return;
-
-		case RMNET_MAP_SUCCESS:
-			break;
-
-		default:
-			kfree_skb(skb);
-			return;
-		}
-	}
+	if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+		return;
 
 	rmnet_vnd_tx_fixup(skb, orig_dev);
 
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 3af3fe7..6ce31e2 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -30,15 +30,6 @@ struct rmnet_map_control_command {
 	};
 }  __aligned(1);
 
-enum rmnet_map_results {
-	RMNET_MAP_SUCCESS,
-	RMNET_MAP_CONSUMED,
-	RMNET_MAP_GENERAL_FAILURE,
-	RMNET_MAP_NOT_ENABLED,
-	RMNET_MAP_FAILED_AGGREGATION,
-	RMNET_MAP_FAILED_MUX
-};
-
 enum rmnet_map_commands {
 	RMNET_MAP_COMMAND_NONE,
 	RMNET_MAP_COMMAND_FLOW_DISABLE,
@@ -56,6 +47,22 @@ struct rmnet_map_header {
 	u16 pkt_len;
 }  __aligned(1);
 
+struct rmnet_map_dl_csum_trailer {
+	u8  reserved1;
+	u8  valid:1;
+	u8  reserved2:7;
+	u16 csum_start_offset;
+	u16 csum_length;
+	__be16 csum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_csum_header {
+	__be16 csum_start_offset;
+	u16 csum_insert_offset:14;
+	u16 udp_ip4_ind:1;
+	u16 csum_enabled:1;
+} __aligned(1);
+
 #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
 				 (Y)->data)->mux_id)
 #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -76,10 +83,13 @@ struct rmnet_map_header {
 #define RMNET_MAP_NO_PAD_BYTES        0
 #define RMNET_MAP_ADD_PAD_BYTES       1
 
-u8 rmnet_map_demultiplex(struct sk_buff *skb);
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port);
 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
 						  int hdrlen, int pad);
 void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct net_device *orig_dev);
 
 #endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 51e6049..6bc328f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
 }
 
 static void rmnet_map_send_ack(struct sk_buff *skb,
-			       unsigned char type)
+			       unsigned char type,
+			       struct rmnet_port *port)
 {
 	struct rmnet_map_control_command *cmd;
 	int xmit_status;
 
+	if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) {
+		if (skb->len < sizeof(struct rmnet_map_header) +
+		    RMNET_MAP_GET_LENGTH(skb) +
+		    sizeof(struct rmnet_map_dl_csum_trailer)) {
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_trim(skb, skb->len -
+			 sizeof(struct rmnet_map_dl_csum_trailer));
+	}
+
 	skb->protocol = htons(ETH_P_MAP);
 
 	cmd = RMNET_MAP_GET_CMD_START(skb);
@@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
 		break;
 	}
 	if (rc == RMNET_MAP_COMMAND_ACK)
-		rmnet_map_send_ack(skb, rc);
+		rmnet_map_send_ack(skb, rc, port);
 }
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 86b8c75..c74a6c56 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -14,6 +14,9 @@
  */
 
 #include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
 #include "rmnet_config.h"
 #include "rmnet_map.h"
 #include "rmnet_private.h"
@@ -21,6 +24,233 @@
 #define RMNET_MAP_DEAGGR_SPACING  64
 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
 
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+					 const void *txporthdr)
+{
+	__sum16 *check = NULL;
+
+	switch (protocol) {
+	case IPPROTO_TCP:
+		check = &(((struct tcphdr *)txporthdr)->check);
+		break;
+
+	case IPPROTO_UDP:
+		check = &(((struct udphdr *)txporthdr)->check);
+		break;
+
+	default:
+		check = NULL;
+		break;
+	}
+
+	return check;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+	__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
+	u16 csum_value, csum_value_final;
+	struct iphdr *ip4h;
+	void *txporthdr;
+	__be16 addend;
+
+	ip4h = (struct iphdr *)(skb->data);
+	if ((ntohs(ip4h->frag_off) & IP_MF) ||
+	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+		return -EOPNOTSUPP;
+
+	txporthdr = skb->data + ip4h->ihl * 4;
+
+	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+
+	if (!csum_field)
+		return -EPROTONOSUPPORT;
+
+	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+	if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP)
+		return 0;
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+	ip_payload_csum = csum16_sub((__force __sum16)csum_value,
+				     (__force __be16)hdr_csum);
+
+	pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+					 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+					 ip4h->protocol, 0);
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip4h->protocol) {
+		case IPPROTO_UDP:
+			/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL4 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field))
+		return 0;
+	else
+		return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+			       struct rmnet_map_dl_csum_trailer *csum_trailer)
+{
+	__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
+	u16 csum_value, csum_value_final;
+	__be16 ip6_hdr_csum, addend;
+	struct ipv6hdr *ip6h;
+	void *txporthdr;
+	u32 length;
+
+	ip6h = (struct ipv6hdr *)(skb->data);
+
+	txporthdr = skb->data + sizeof(struct ipv6hdr);
+	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+
+	if (!csum_field)
+		return -EPROTONOSUPPORT;
+
+	csum_value = ~ntohs(csum_trailer->csum_value);
+	ip6_hdr_csum = (__force __be16)
+			~ntohs((__force __be16)ip_compute_csum(ip6h,
+			       (int)(txporthdr - (void *)(skb->data))));
+	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
+				      ip6_hdr_csum);
+
+	length = (ip6h->nexthdr == IPPROTO_UDP) ?
+		 ntohs(((struct udphdr *)txporthdr)->len) :
+		 ntohs(ip6h->payload_len);
+	pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+			     length, ip6h->nexthdr, 0));
+	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
+	pseudo_csum = csum16_add(ip6_payload_csum, addend);
+
+	addend = (__force __be16)ntohs((__force __be16)*csum_field);
+	csum_temp = ~csum16_sub(pseudo_csum, addend);
+	csum_value_final = (__force u16)csum_temp;
+
+	if (unlikely(csum_value_final == 0)) {
+		switch (ip6h->nexthdr) {
+		case IPPROTO_UDP:
+			/* RFC 2460 section 8.1
+			 * DL6 One's complement rule for UDP checksum 0
+			 */
+			csum_value_final = ~csum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			/* DL6 Non-RFC compliant TCP checksum found */
+			if (*csum_field == (__force __sum16)0xFFFF)
+				csum_value_final = ~csum_value_final;
+			break;
+		}
+	}
+
+	if (csum_value_final == ntohs((__force __be16)*csum_field))
+		return 0;
+	else
+		return -EINVAL;
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = iphdr + ip4h->ihl * 4;
+
+	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(void *iphdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)iphdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+	if (ip4h->protocol == IPPROTO_UDP)
+		ul_header->udp_ip4_ind = 1;
+	else
+		ul_header->udp_ip4_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+	void *txphdr;
+	u16 *csum;
+
+	txphdr = ip6hdr + sizeof(struct ipv6hdr);
+
+	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+			      struct rmnet_map_ul_csum_header *ul_header,
+			      struct sk_buff *skb)
+{
+	__be16 *hdr = (__be16 *)ul_header, offset;
+
+	offset = htons((__force u16)(skb_transport_header(skb) -
+				     (unsigned char *)ip6hdr));
+	ul_header->csum_start_offset = offset;
+	ul_header->csum_insert_offset = skb->csum_offset;
+	ul_header->csum_enabled = 1;
+	ul_header->udp_ip4_ind = 0;
+
+	/* Changing remaining fields to network order */
+	hdr++;
+	*hdr = htons((__force u16)*hdr);
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+}
+#endif
+
 /* Adds MAP header to front of skb->data
  * Padding is calculated and set appropriately in MAP header. Mux ID is
  * initialized to 0.
@@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
 	u32 padding, map_datalen;
 	u8 *padbytes;
 
-	if (skb_headroom(skb) < sizeof(struct rmnet_map_header))
-		return NULL;
-
 	map_datalen = skb->len - hdrlen;
 	map_header = (struct rmnet_map_header *)
 			skb_push(skb, sizeof(struct rmnet_map_header));
@@ -69,7 +296,8 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
  * returned, indicating that there are no more packets to deaggregate. Caller
  * is responsible for freeing the original skb.
  */
-struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_port *port)
 {
 	struct rmnet_map_header *maph;
 	struct sk_buff *skbn;
@@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
 	maph = (struct rmnet_map_header *)skb->data;
 	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
 
+	if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)
+		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+
 	if (((int)skb->len - (int)packet_len) < 0)
 		return NULL;
 
@@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb)
 
 	return skbn;
 }
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+	struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+		return -EOPNOTSUPP;
+
+	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+	if (!csum_trailer->valid)
+		return -EINVAL;
+
+	if (skb->protocol == htons(ETH_P_IP))
+		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer);
+	else if (skb->protocol == htons(ETH_P_IPV6))
+#if IS_ENABLED(CONFIG_IPV6)
+		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer);
+#else
+		return -EPROTONOSUPPORT;
+#endif
+
+	return 0;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				      struct net_device *orig_dev)
+{
+	struct rmnet_map_ul_csum_header *ul_header;
+	void *iphdr;
+
+	ul_header = (struct rmnet_map_ul_csum_header *)
+		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+	if (unlikely(!(orig_dev->features &
+		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+		goto sw_csum;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		iphdr = (char *)ul_header +
+			sizeof(struct rmnet_map_ul_csum_header);
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+			return;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_IPV6)
+			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+			return;
+#else
+			goto sw_csum;
+#endif
+		}
+	}
+
+sw_csum:
+	ul_header->csum_start_offset = 0;
+	ul_header->csum_insert_offset = 0;
+	ul_header->csum_enabled = 0;
+	ul_header->udp_ip4_ind = 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index 49102f9..de0143e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -19,14 +19,10 @@
 #define RMNET_TX_QUEUE_LEN         1000
 
 /* Constants */
-#define RMNET_EGRESS_FORMAT_MAP                 BIT(1)
-#define RMNET_EGRESS_FORMAT_AGGREGATION         BIT(2)
-#define RMNET_EGRESS_FORMAT_MUXING              BIT(3)
-
-#define RMNET_INGRESS_FORMAT_MAP                BIT(1)
-#define RMNET_INGRESS_FORMAT_DEAGGREGATION      BIT(2)
-#define RMNET_INGRESS_FORMAT_DEMUXING           BIT(3)
-#define RMNET_INGRESS_FORMAT_MAP_COMMANDS       BIT(4)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION      BIT(0)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS       BIT(1)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4        BIT(2)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4         BIT(3)
 
 /* Replace skb->dev to a virtual rmnet device and pass up the stack */
 #define RMNET_EPMODE_VND (1)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9caa5e3..570a227 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -185,6 +185,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
 	if (ep->egress_dev)
 		return -EINVAL;
 
+	if (rmnet_get_endpoint(port, id))
+		return -EBUSY;
+
+	rmnet_dev->hw_features = NETIF_F_RXCSUM;
+	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	rmnet_dev->hw_features |= NETIF_F_SG;
+
 	rc = register_netdevice(rmnet_dev);
 	if (!rc) {
 		ep->egress_dev = rmnet_dev;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e7ab23e..81045df 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 	mss = skb_shinfo(skb)->gso_size;
 
 	if (mss > MSSMask) {
-		WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
-			  mss);
+		netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
+				 mss);
 		goto out_dma_error;
 	}
 
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index fc0d5fa..272c596 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
 	}
 }
 
-static void __rtl8169_check_link_status(struct net_device *dev,
-					struct rtl8169_private *tp,
-					void __iomem *ioaddr, bool pm)
+static void rtl8169_check_link_status(struct net_device *dev,
+				      struct rtl8169_private *tp,
+				      void __iomem *ioaddr)
 {
 	if (tp->link_ok(ioaddr)) {
 		rtl_link_chg_patch(tp);
 		/* This is to cancel a scheduled suspend if there's one. */
-		if (pm)
-			pm_request_resume(&tp->pci_dev->dev);
+		pm_request_resume(&tp->pci_dev->dev);
 		netif_carrier_on(dev);
 		if (net_ratelimit())
 			netif_info(tp, ifup, dev, "link up\n");
 	} else {
 		netif_carrier_off(dev);
 		netif_info(tp, ifdown, dev, "link down\n");
-		if (pm)
-			pm_schedule_suspend(&tp->pci_dev->dev, 5000);
+		pm_runtime_idle(&tp->pci_dev->dev);
 	}
 }
 
-static void rtl8169_check_link_status(struct net_device *dev,
-				      struct rtl8169_private *tp,
-				      void __iomem *ioaddr)
-{
-	__rtl8169_check_link_status(dev, tp, ioaddr, false);
-}
-
 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 
 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
@@ -4643,16 +4634,6 @@ static void rtl8169_phy_timer(struct timer_list *t)
 	rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
 }
 
-static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
-				  void __iomem *ioaddr)
-{
-	iounmap(ioaddr);
-	pci_release_regions(pdev);
-	pci_clear_mwi(pdev);
-	pci_disable_device(pdev);
-	free_netdev(dev);
-}
-
 DECLARE_RTL_COND(rtl_phy_reset_cond)
 {
 	return tp->phy_reset_pending(tp);
@@ -4784,14 +4765,6 @@ static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data
 	return -EOPNOTSUPP;
 }
 
-static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
-{
-	if (tp->features & RTL_FEATURE_MSI) {
-		pci_disable_msi(pdev);
-		tp->features &= ~RTL_FEATURE_MSI;
-	}
-}
-
 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
 {
 	struct mdio_ops *ops = &tp->mdio_ops;
@@ -7764,7 +7737,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
 		rtl8169_pcierr_interrupt(dev);
 
 	if (status & LinkChg)
-		__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
+		rtl8169_check_link_status(dev, tp, tp->mmio_addr);
 
 	rtl_irq_enable_all(tp);
 }
@@ -7977,7 +7950,7 @@ static int rtl_open(struct net_device *dev)
 	rtl_unlock_work(tp);
 
 	tp->saved_wolopts = 0;
-	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
 
 	rtl8169_check_link_status(dev, tp, ioaddr);
 out:
@@ -8121,8 +8094,10 @@ static int rtl8169_runtime_suspend(struct device *device)
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct rtl8169_private *tp = netdev_priv(dev);
 
-	if (!tp->TxDescArray)
+	if (!tp->TxDescArray) {
+		rtl_pll_power_down(tp);
 		return 0;
+	}
 
 	rtl_lock_work(tp);
 	tp->saved_wolopts = __rtl8169_get_wol(tp);
@@ -8164,9 +8139,11 @@ static int rtl8169_runtime_idle(struct device *device)
 {
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
-	struct rtl8169_private *tp = netdev_priv(dev);
 
-	return tp->TxDescArray ? -EBUSY : 0;
+	if (!netif_running(dev) || !netif_carrier_ok(dev))
+		pm_schedule_suspend(device, 10000);
+
+	return -EBUSY;
 }
 
 static const struct dev_pm_ops rtl8169_pm_ops = {
@@ -8213,9 +8190,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct rtl8169_private *tp = netdev_priv(dev);
-	struct device *d = &pdev->dev;
-
-	pm_runtime_get_sync(d);
 
 	rtl8169_net_suspend(dev);
 
@@ -8233,8 +8207,6 @@ static void rtl_shutdown(struct pci_dev *pdev)
 		pci_wake_from_d3(pdev, true);
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
-
-	pm_runtime_put_noidle(d);
 }
 
 static void rtl_remove_one(struct pci_dev *pdev)
@@ -8256,9 +8228,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
 	unregister_netdev(dev);
 
-	dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
-			  tp->counters, tp->counters_phys_addr);
-
 	rtl_release_firmware(tp);
 
 	if (pci_dev_run_wake(pdev))
@@ -8266,9 +8235,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
 	/* restore original MAC address */
 	rtl_rar_set(tp, dev->perm_addr);
-
-	rtl_disable_msi(pdev, tp);
-	rtl8169_release_board(pdev, dev, tp->mmio_addr);
 }
 
 static const struct net_device_ops rtl_netdev_ops = {
@@ -8445,11 +8411,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		       MODULENAME, RTL8169_VERSION);
 	}
 
-	dev = alloc_etherdev(sizeof (*tp));
-	if (!dev) {
-		rc = -ENOMEM;
-		goto out;
-	}
+	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
+	if (!dev)
+		return -ENOMEM;
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 	dev->netdev_ops = &rtl_netdev_ops;
@@ -8472,13 +8436,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 				     PCIE_LINK_STATE_CLKPM);
 
 	/* enable device (incl. PCI PM wakeup and hotplug setup) */
-	rc = pci_enable_device(pdev);
+	rc = pcim_enable_device(pdev);
 	if (rc < 0) {
 		netif_err(tp, probe, dev, "enable failure\n");
-		goto err_out_free_dev_1;
+		return rc;
 	}
 
-	if (pci_set_mwi(pdev) < 0)
+	if (pcim_set_mwi(pdev) < 0)
 		netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
 
 	/* make sure PCI base addr 1 is MMIO */
@@ -8486,30 +8450,28 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		netif_err(tp, probe, dev,
 			  "region #%d not an MMIO resource, aborting\n",
 			  region);
-		rc = -ENODEV;
-		goto err_out_mwi_2;
+		return -ENODEV;
 	}
 
 	/* check for weird/broken PCI region reporting */
 	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
 		netif_err(tp, probe, dev,
 			  "Invalid PCI region size(s), aborting\n");
-		rc = -ENODEV;
-		goto err_out_mwi_2;
+		return -ENODEV;
 	}
 
 	rc = pci_request_regions(pdev, MODULENAME);
 	if (rc < 0) {
 		netif_err(tp, probe, dev, "could not request regions\n");
-		goto err_out_mwi_2;
+		return rc;
 	}
 
 	/* ioremap MMIO region */
-	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
+	ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region),
+			      R8169_REGS_SIZE);
 	if (!ioaddr) {
 		netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
-		rc = -EIO;
-		goto err_out_free_res_3;
+		return -EIO;
 	}
 	tp->mmio_addr = ioaddr;
 
@@ -8535,7 +8497,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (rc < 0) {
 			netif_err(tp, probe, dev, "DMA configuration failed\n");
-			goto err_out_unmap_4;
+			return rc;
 		}
 	}
 
@@ -8697,16 +8659,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 
-	tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
-					   &tp->counters_phys_addr, GFP_KERNEL);
-	if (!tp->counters) {
-		rc = -ENOMEM;
-		goto err_out_msi_5;
-	}
+	tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+					    &tp->counters_phys_addr,
+					    GFP_KERNEL);
+	if (!tp->counters)
+		return -ENOMEM;
 
 	rc = register_netdev(dev);
 	if (rc < 0)
-		goto err_out_cnt_6;
+		return rc;
 
 	pci_set_drvdata(pdev, dev);
 
@@ -8730,30 +8691,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 		rtl8168_driver_start(tp);
 	}
 
-	if (pci_dev_run_wake(pdev))
-		pm_runtime_put_noidle(&pdev->dev);
-
 	netif_carrier_off(dev);
 
-out:
-	return rc;
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_put_sync(&pdev->dev);
 
-err_out_cnt_6:
-	dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
-			  tp->counters_phys_addr);
-err_out_msi_5:
-	netif_napi_del(&tp->napi);
-	rtl_disable_msi(pdev, tp);
-err_out_unmap_4:
-	iounmap(ioaddr);
-err_out_free_res_3:
-	pci_release_regions(pdev);
-err_out_mwi_2:
-	pci_clear_mwi(pdev);
-	pci_disable_device(pdev);
-err_out_free_dev_1:
-	free_netdev(dev);
-	goto out;
+	return 0;
 }
 
 static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 009780d..c87f57c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2205,8 +2205,7 @@ static int ravb_probe(struct platform_device *pdev)
 	if (chip_id != RCAR_GEN2)
 		ravb_ptp_stop(ndev);
 out_release:
-	if (ndev)
-		free_netdev(ndev);
+	free_netdev(ndev);
 
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b9e2846..7aa1c12 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3301,8 +3301,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 
 out_release:
 	/* net_dev free */
-	if (ndev)
-		free_netdev(ndev);
+	free_netdev(ndev);
 
 	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e566dbb..8ae467d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -160,11 +160,31 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
 		EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
 }
 
+/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
+ * I/O space and BAR 2(&3) for memory.  On SFC9250 (Medford2), there is no I/O
+ * bar; PFs use BAR 0/1 for memory.
+ */
+static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
+{
+	switch (efx->pci_dev->device) {
+	case 0x0b03: /* SFC9250 PF */
+		return 0;
+	default:
+		return 2;
+	}
+}
+
+/* All VFs use BAR 0/1 for memory */
+static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
+{
+	return 0;
+}
+
 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
 {
 	int bar;
 
-	bar = efx->type->mem_bar;
+	bar = efx->type->mem_bar(efx);
 	return resource_size(&efx->pci_dev->resource[bar]);
 }
 
@@ -213,7 +233,7 @@ static int efx_ef10_get_vf_index(struct efx_nic *efx)
 
 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 {
-	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	size_t outlen;
 	int rc;
@@ -257,6 +277,48 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 		return -ENODEV;
 	}
 
+	if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+		u8 vi_window_mode = MCDI_BYTE(outbuf,
+				GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+		switch (vi_window_mode) {
+		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+			efx->vi_stride = 8192;
+			break;
+		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+			efx->vi_stride = 16384;
+			break;
+		case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+			efx->vi_stride = 65536;
+			break;
+		default:
+			netif_err(efx, probe, efx->net_dev,
+				  "Unrecognised VI window mode %d\n",
+				  vi_window_mode);
+			return -EIO;
+		}
+		netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
+			  efx->vi_stride);
+	} else {
+		/* keep default VI stride */
+		netif_dbg(efx, probe, efx->net_dev,
+			  "firmware did not report VI window mode, assuming vi_stride = %u\n",
+			  efx->vi_stride);
+	}
+
+	if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+		efx->num_mac_stats = MCDI_WORD(outbuf,
+				GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+		netif_dbg(efx, probe, efx->net_dev,
+			  "firmware reports num_mac_stats = %u\n",
+			  efx->num_mac_stats);
+	} else {
+		/* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
+		netif_dbg(efx, probe, efx->net_dev,
+			  "firmware did not report num_mac_stats, assuming %u\n",
+			  efx->num_mac_stats);
+	}
+
 	return 0;
 }
 
@@ -589,17 +651,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
 	struct efx_ef10_nic_data *nic_data;
 	int i, rc;
 
-	/* We can have one VI for each 8K region.  However, until we
-	 * use TX option descriptors we need two TX queues per channel.
-	 */
-	efx->max_channels = min_t(unsigned int,
-				  EFX_MAX_CHANNELS,
-				  efx_ef10_mem_map_size(efx) /
-				  (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
-	efx->max_tx_channels = efx->max_channels;
-	if (WARN_ON(efx->max_channels == 0))
-		return -EIO;
-
 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
 	if (!nic_data)
 		return -ENOMEM;
@@ -671,6 +722,20 @@ static int efx_ef10_probe(struct efx_nic *efx)
 	if (rc < 0)
 		goto fail5;
 
+	/* We can have one VI for each vi_stride-byte region.
+	 * However, until we use TX option descriptors we need two TX queues
+	 * per channel.
+	 */
+	efx->max_channels = min_t(unsigned int,
+				  EFX_MAX_CHANNELS,
+				  efx_ef10_mem_map_size(efx) /
+				  (efx->vi_stride * EFX_TXQ_TYPES));
+	efx->max_tx_channels = efx->max_channels;
+	if (WARN_ON(efx->max_channels == 0)) {
+		rc = -EIO;
+		goto fail5;
+	}
+
 	efx->rx_packet_len_offset =
 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
 
@@ -695,7 +760,14 @@ static int efx_ef10_probe(struct efx_nic *efx)
 	if (rc && rc != -EPERM)
 		goto fail5;
 
-	efx_ptp_probe(efx, NULL);
+	rc = efx_ptp_probe(efx, NULL);
+	/* Failure to probe PTP is not fatal.
+	 * In the case of EPERM, efx_ptp_probe will print its own message (in
+	 * efx_ptp_get_attributes()), so we don't need to.
+	 */
+	if (rc && rc != -EPERM)
+		netif_warn(efx, drv, efx->net_dev,
+			   "Failed to probe PTP, rc=%d\n", rc);
 
 #ifdef CONFIG_SFC_SRIOV
 	if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
@@ -907,7 +979,7 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
 			} else {
 				tx_queue->piobuf =
 					nic_data->pio_write_base +
-					index * EFX_VI_PAGE_SIZE + offset;
+					index * efx->vi_stride + offset;
 				tx_queue->piobuf_offset = offset;
 				netif_dbg(efx, probe, efx->net_dev,
 					  "linked VI %u to PIO buffer %u offset %x addr %p\n",
@@ -1253,19 +1325,19 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
 	 * for writing PIO buffers through.
 	 *
 	 * The UC mapping contains (channel_vis - 1) complete VIs and the
-	 * first half of the next VI.  Then the WC mapping begins with
-	 * the second half of this last VI.
+	 * first 4K of the next VI.  Then the WC mapping begins with
+	 * the remainder of this last VI.
 	 */
-	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
+	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
 				     ER_DZ_TX_PIOBUF);
 	if (nic_data->n_piobufs) {
 		/* pio_write_vi_base rounds down to give the number of complete
 		 * VIs inside the UC mapping.
 		 */
-		pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+		pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
 		wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
 					       nic_data->n_piobufs) *
-					      EFX_VI_PAGE_SIZE) -
+					      efx->vi_stride) -
 				   uc_mem_map_size);
 		max_vis = pio_write_vi_base + nic_data->n_piobufs;
 	} else {
@@ -1337,7 +1409,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
 		nic_data->pio_write_vi_base = pio_write_vi_base;
 		nic_data->pio_write_base =
 			nic_data->wc_membase +
-			(pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+			(pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
 			 uc_mem_map_size);
 
 		rc = efx_ef10_link_piobufs(efx);
@@ -1571,6 +1643,29 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 	EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
 	EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
 	EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
+	EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
+	EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
+	EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
+	EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
+	EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
+	EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
+	EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START),
+	EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
+	EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
+	EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
+	EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
+	EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
+	EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
+	EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
+	EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
+	EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
+	EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
+	EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
+	EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
+	EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
+	EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
+	EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
+	EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |	\
@@ -1646,6 +1741,43 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 	(1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |			\
 	(1ULL << EF10_STAT_port_rx_dp_hlb_wait))
 
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_FEC_STAT_MASK (						\
+	(1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) |		\
+	(1ULL << (EF10_STAT_fec_corrected_errors - 64)) |		\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) |	\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) |	\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) |	\
+	(1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
+
+/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
+ * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
+ * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
+ * These bits are in the second u64 of the raw mask.
+ */
+#define EF10_CTPIO_STAT_MASK (						\
+	(1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_long_write_success - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) |		\
+	(1ULL << (EF10_STAT_ctpio_success - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_fallback - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_poison - 64)) |			\
+	(1ULL << (EF10_STAT_ctpio_erase - 64)))
+
 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 {
 	u64 raw_mask = HUNT_COMMON_STAT_MASK;
@@ -1684,10 +1816,22 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
 	if (nic_data->datapath_caps &
 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
 		raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
-		raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+		raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
 	} else {
 		raw_mask[1] = 0;
 	}
+	/* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
+	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
+		raw_mask[1] |= EF10_FEC_STAT_MASK;
+
+	/* CTPIO stats appear in V3. Only show them on devices that actually
+	 * support CTPIO. Although this driver doesn't use CTPIO others might,
+	 * and we may be reporting the stats for the underlying port.
+	 */
+	if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
+	    (nic_data->datapath_caps2 &
+	     (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
+		raw_mask[1] |= EF10_CTPIO_STAT_MASK;
 
 #if BITS_PER_LONG == 64
 	BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
@@ -1791,7 +1935,7 @@ static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
 
 	dma_stats = efx->stats_buffer.addr;
 
-	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	generation_end = dma_stats[efx->num_mac_stats - 1];
 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
 		return 0;
 	rmb();
@@ -1839,7 +1983,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
 	__le64 generation_start, generation_end;
 	u64 *stats = nic_data->stats;
-	u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+	u32 dma_len = efx->num_mac_stats * sizeof(u64);
 	struct efx_buffer stats_buf;
 	__le64 *dma_stats;
 	int rc;
@@ -1864,7 +2008,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
 	}
 
 	dma_stats = stats_buf.addr;
-	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
 	MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
 	MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
@@ -1883,7 +2027,7 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
 		goto out;
 	}
 
-	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	generation_end = dma_stats[efx->num_mac_stats - 1];
 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
 		WARN_ON_ONCE(1);
 		goto out;
@@ -1951,8 +2095,9 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
 	} else {
 		unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
 
-		EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
-				     ERF_DZ_TC_TIMER_VAL, ticks);
+		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+				     ERF_DZ_TC_TIMER_VAL, ticks,
+				     ERF_FZ_TC_TMR_REL_VAL, ticks);
 		efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
 				channel->channel);
 	}
@@ -3233,8 +3378,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
 		if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
 			     ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
 			       rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
-			      (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
-			       rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
+			      (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+			       rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
 			netdev_WARN(efx->net_dev,
 				    "invalid class for RX_TCPUDP_CKSUM_ERR: event="
 				    EFX_QWORD_FMT "\n",
@@ -3271,8 +3416,8 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
 				    EFX_QWORD_VAL(*event));
 		else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
 				   rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
-				  (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
-				   rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
+				  (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
+				   rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
 			netdev_WARN(efx->net_dev,
 				    "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
 				    EFX_QWORD_FMT "\n",
@@ -3307,7 +3452,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 	next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
 	rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
 	rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
-	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
 	rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
 	rx_encap_hdr =
 		nic_data->datapath_caps &
@@ -3385,8 +3530,8 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 							 rx_l3_class, rx_l4_class,
 							 event);
 	} else {
-		bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
-			      rx_l4_class == ESE_DZ_L4_CLASS_UDP;
+		bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
+			      rx_l4_class == ESE_FZ_L4_CLASS_UDP;
 
 		switch (rx_encap_hdr) {
 		case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
@@ -3407,7 +3552,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
 		}
 	}
 
-	if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+	if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
 		flags |= EFX_RX_PKT_TCP;
 
 	channel->irq_mod_score += 2 * n_packets;
@@ -6392,7 +6537,7 @@ static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
 
 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
 	.is_vf = true,
-	.mem_bar = EFX_MEM_VF_BAR,
+	.mem_bar = efx_ef10_vf_mem_bar,
 	.mem_map_size = efx_ef10_mem_map_size,
 	.probe = efx_ef10_probe_vf,
 	.remove = efx_ef10_remove,
@@ -6500,7 +6645,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
 
 const struct efx_nic_type efx_hunt_a0_nic_type = {
 	.is_vf = false,
-	.mem_bar = EFX_MEM_BAR,
+	.mem_bar = efx_ef10_pf_mem_bar,
 	.mem_map_size = efx_ef10_mem_map_size,
 	.probe = efx_ef10_probe_pf,
 	.remove = efx_ef10_remove,
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 2c4bf94..6a56778 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -1,6 +1,6 @@
 /****************************************************************************
  * Driver for Solarflare network controllers and boards
- * Copyright 2012-2015 Solarflare Communications Inc.
+ * Copyright 2012-2017 Solarflare Communications Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
@@ -79,6 +79,8 @@
 #define	ER_DZ_EVQ_TMR 0x00000420
 #define	ER_DZ_EVQ_TMR_STEP 8192
 #define	ER_DZ_EVQ_TMR_ROWS 2048
+#define	ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define	ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
 #define	ERF_DZ_TC_TIMER_MODE_LBN 14
 #define	ERF_DZ_TC_TIMER_MODE_WIDTH 2
 #define	ERF_DZ_TC_TIMER_VAL_LBN 0
@@ -159,16 +161,24 @@
 #define	ESF_DZ_RX_EV_SOFT2_WIDTH 2
 #define	ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
 #define	ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
-#define	ESF_DZ_RX_L4_CLASS_LBN 45
-#define	ESF_DZ_RX_L4_CLASS_WIDTH 3
-#define	ESE_DZ_L4_CLASS_RSVD7 7
-#define	ESE_DZ_L4_CLASS_RSVD6 6
-#define	ESE_DZ_L4_CLASS_RSVD5 5
-#define	ESE_DZ_L4_CLASS_RSVD4 4
-#define	ESE_DZ_L4_CLASS_RSVD3 3
-#define	ESE_DZ_L4_CLASS_UDP 2
-#define	ESE_DZ_L4_CLASS_TCP 1
-#define	ESE_DZ_L4_CLASS_UNKNOWN 0
+#define	ESF_DE_RX_L4_CLASS_LBN 45
+#define	ESF_DE_RX_L4_CLASS_WIDTH 3
+#define	ESE_DE_L4_CLASS_RSVD7 7
+#define	ESE_DE_L4_CLASS_RSVD6 6
+#define	ESE_DE_L4_CLASS_RSVD5 5
+#define	ESE_DE_L4_CLASS_RSVD4 4
+#define	ESE_DE_L4_CLASS_RSVD3 3
+#define	ESE_DE_L4_CLASS_UDP 2
+#define	ESE_DE_L4_CLASS_TCP 1
+#define	ESE_DE_L4_CLASS_UNKNOWN 0
+#define	ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define	ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define	ESF_FZ_RX_L4_CLASS_LBN 45
+#define	ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define	ESE_FZ_L4_CLASS_RSVD3 3
+#define	ESE_FZ_L4_CLASS_UDP 2
+#define	ESE_FZ_L4_CLASS_TCP 1
+#define	ESE_FZ_L4_CLASS_UNKNOWN 0
 #define	ESF_DZ_RX_L3_CLASS_LBN 42
 #define	ESF_DZ_RX_L3_CLASS_WIDTH 3
 #define	ESE_DZ_L3_CLASS_RSVD7 7
@@ -215,6 +225,8 @@
 #define	ESF_EZ_RX_ABORT_WIDTH 1
 #define	ESF_DZ_RX_ECC_ERR_LBN 29
 #define	ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define	ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define	ESF_DZ_RX_TRUNC_ERR_WIDTH 1
 #define	ESF_DZ_RX_CRC1_ERR_LBN 28
 #define	ESF_DZ_RX_CRC1_ERR_WIDTH 1
 #define	ESF_DZ_RX_CRC0_ERR_LBN 27
@@ -332,6 +344,8 @@
 #define	ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
 #define	ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
 #define	ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
 #define	ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
 #define	ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
 #define	ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
@@ -341,7 +355,7 @@
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
 
-/* TX_TSO_FATSO2A_DESC */
+/* TX_TSO_V2_DESC_A */
 #define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
 #define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
 #define	ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -360,8 +374,7 @@
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
 #define	ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
 
-
-/* TX_TSO_FATSO2B_DESC */
+/* TX_TSO_V2_DESC_B */
 #define	ESF_DZ_TX_DESC_IS_OPT_LBN 63
 #define	ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
 #define	ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -375,11 +388,10 @@
 #define	ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
 #define	ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
 #define	ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
-#define	ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 0
-#define	ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
 #define	ESF_DZ_TX_TSO_TCP_MSS_LBN 32
 #define	ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
-
+#define	ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define	ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
 
 /*************************************************************************/
 
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e3c492f..12f0abc 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -27,6 +27,7 @@
 #include <net/udp_tunnel.h>
 #include "efx.h"
 #include "nic.h"
+#include "io.h"
 #include "selftest.h"
 #include "sriov.h"
 
@@ -952,31 +953,42 @@ void efx_link_status_changed(struct efx_nic *efx)
 		netif_info(efx, link, efx->net_dev, "link down\n");
 }
 
-void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
+void efx_link_set_advertising(struct efx_nic *efx,
+			      const unsigned long *advertising)
 {
-	efx->link_advertising = advertising;
-	if (advertising) {
-		if (advertising & ADVERTISED_Pause)
-			efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
-		else
-			efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
-		if (advertising & ADVERTISED_Asym_Pause)
-			efx->wanted_fc ^= EFX_FC_TX;
-	}
+	memcpy(efx->link_advertising, advertising,
+	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+	efx->link_advertising[0] |= ADVERTISED_Autoneg;
+	if (advertising[0] & ADVERTISED_Pause)
+		efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+	else
+		efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+	if (advertising[0] & ADVERTISED_Asym_Pause)
+		efx->wanted_fc ^= EFX_FC_TX;
+}
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+	bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+	efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
 }
 
 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
 {
 	efx->wanted_fc = wanted_fc;
-	if (efx->link_advertising) {
+	if (efx->link_advertising[0]) {
 		if (wanted_fc & EFX_FC_RX)
-			efx->link_advertising |= (ADVERTISED_Pause |
-						  ADVERTISED_Asym_Pause);
+			efx->link_advertising[0] |= (ADVERTISED_Pause |
+						     ADVERTISED_Asym_Pause);
 		else
-			efx->link_advertising &= ~(ADVERTISED_Pause |
-						   ADVERTISED_Asym_Pause);
+			efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+						      ADVERTISED_Asym_Pause);
 		if (wanted_fc & EFX_FC_TX)
-			efx->link_advertising ^= ADVERTISED_Asym_Pause;
+			efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
 	}
 }
 
@@ -1248,7 +1260,7 @@ static int efx_init_io(struct efx_nic *efx)
 
 	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
 
-	bar = efx->type->mem_bar;
+	bar = efx->type->mem_bar(efx);
 
 	rc = pci_enable_device(pci_dev);
 	if (rc) {
@@ -1323,7 +1335,7 @@ static void efx_fini_io(struct efx_nic *efx)
 	}
 
 	if (efx->membase_phys) {
-		bar = efx->type->mem_bar;
+		bar = efx->type->mem_bar(efx);
 		pci_release_region(efx->pci_dev, bar);
 		efx->membase_phys = 0;
 	}
@@ -2909,6 +2921,10 @@ static const struct pci_device_id efx_pci_table[] = {
 	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
 	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),  /* SFC9220 VF */
 	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),  /* SFC9250 PF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),  /* SFC9250 VF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
 	{0}			/* end of list */
 };
 
@@ -2977,6 +2993,9 @@ static int efx_init_struct(struct efx_nic *efx,
 	efx->rx_packet_ts_offset =
 		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
 	spin_lock_init(&efx->stats_lock);
+	efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+	efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+	BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
 	mutex_init(&efx->mac_lock);
 	efx->phy_op = &efx_dummy_phy_operations;
 	efx->mdio.dev = net_dev;
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 52c84b7..0cddc5a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,11 +14,6 @@
 #include "net_driver.h"
 #include "filter.h"
 
-/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
-/* All VFs use BAR 0/1 for memory */
-#define EFX_MEM_BAR 2
-#define EFX_MEM_VF_BAR 0
-
 int efx_net_open(struct net_device *net_dev);
 int efx_net_stop(struct net_device *net_dev);
 
@@ -263,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
 }
 
 void efx_link_status_changed(struct efx_nic *efx);
-void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_advertising(struct efx_nic *efx,
+			      const unsigned long *advertising);
+void efx_link_clear_advertising(struct efx_nic *efx);
 void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
 
 static inline void efx_device_detach_sync(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3747b56..4db2dc2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 		goto out;
 	}
 
-	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
+	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
 		netif_dbg(efx, drv, efx->net_dev,
 			  "Autonegotiation is disabled\n");
 		rc = -EINVAL;
@@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
 	    (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
 		efx->type->prepare_enable_fc_tx(efx);
 
-	old_adv = efx->link_advertising;
+	old_adv = efx->link_advertising[0];
 	old_fc = efx->wanted_fc;
 	efx_link_set_wanted_fc(efx, wanted_fc);
-	if (efx->link_advertising != old_adv ||
+	if (efx->link_advertising[0] != old_adv ||
 	    (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
 		rc = efx->phy_op->reconfigure(efx);
 		if (rc) {
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index afb94aa..8956317 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -222,18 +222,21 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
 	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
 }
 
-/* Page size used as step between per-VI registers */
-#define EFX_VI_PAGE_SIZE 0x2000
+/* default VI stride (step between per-VI registers) is 8K */
+#define EFX_DEFAULT_VI_STRIDE 0x2000
 
 /* Calculate offset to page-mapped register */
-#define EFX_PAGED_REG(page, reg) \
-	((page) * EFX_VI_PAGE_SIZE + (reg))
+static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
+					 unsigned int reg)
+{
+	return page * efx->vi_stride + reg;
+}
 
 /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
 static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
 				    unsigned int reg, unsigned int page)
 {
-	reg = EFX_PAGED_REG(page, reg);
+	reg = efx_paged_reg(efx, page, reg);
 
 	netif_vdbg(efx, hw, efx->net_dev,
 		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
@@ -262,7 +265,7 @@ static inline void
 _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
 		 unsigned int reg, unsigned int page)
 {
-	efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+	efx_writed(efx, value, efx_paged_reg(efx, page, reg));
 }
 #define efx_writed_page(efx, value, reg, page)				\
 	_efx_writed_page(efx, value,					\
@@ -288,10 +291,10 @@ static inline void _efx_writed_page_locked(struct efx_nic *efx,
 
 	if (page == 0) {
 		spin_lock_irqsave(&efx->biu_lock, flags);
-		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
 		spin_unlock_irqrestore(&efx->biu_lock, flags);
 	} else {
-		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
+		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
 	}
 }
 #define efx_writed_page_locked(efx, value, reg, page)			\
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 154ef41..ebd9597 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -208,6 +208,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define _MCDI_DWORD(_buf, _field)					\
 	((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
 
+#define MCDI_BYTE(_buf, _field)						\
+	((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1),	\
+	 *MCDI_PTR(_buf, _field))
 #define MCDI_WORD(_buf, _field)						\
 	((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) +	\
 	 le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 91fb54f..869d76f 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -114,6 +114,8 @@
 #define MCDI_HEADER_XFLAGS_WIDTH 8
 /* Request response using event */
 #define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
 
 /* Maximum number of payload bytes */
 #define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
@@ -123,7 +125,7 @@
 
 
 /* The MC can generate events for two reasons:
- *   - To complete a shared memory request if XFLAGS_EVREQ was set
+ *   - To advance a shared memory request if XFLAGS_EVREQ was set
  *   - As a notification (link state, i2c event), controlled
  *     via MC_CMD_LOG_CTRL
  *
@@ -279,6 +281,17 @@
 /* Returned by MC_CMD_TESTASSERT if the action that should
  * have caused an assertion failed to do so.  */
 #define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away.  This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away.  This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
 
 #define MC_CMD_ERR_CODE_OFST 0
 
@@ -360,6 +373,7 @@
 /* enum: Fatal. */
 #define          MCDI_EVENT_LEVEL_FATAL 0x3
 #define       MCDI_EVENT_DATA_OFST 0
+#define       MCDI_EVENT_DATA_LEN 4
 #define        MCDI_EVENT_CMDDONE_SEQ_LBN 0
 #define        MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
 #define        MCDI_EVENT_CMDDONE_DATALEN_LBN 8
@@ -370,6 +384,8 @@
 #define        MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN  0x0
 /* enum: 100Mbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_100M  0x1
 /* enum: 1Gbs */
@@ -378,6 +394,12 @@
 #define          MCDI_EVENT_LINKCHANGE_SPEED_10G  0x3
 /* enum: 40Gbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_40G  0x4
+/* enum: 25Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_25G  0x5
+/* enum: 50Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_50G  0x6
+/* enum: 100Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_100G  0x7
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
 #define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -456,8 +478,63 @@
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
 /* enum: PTP status update */
 #define          MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define          MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define          MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define          MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define          MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define          MCDI_EVENT_AOE_FC_RUNNING 0x14
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
 #define        MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
 #define        MCDI_EVENT_RX_ERR_TYPE_LBN 12
@@ -480,6 +557,22 @@
 #define          MCDI_EVENT_MUM_WATCHDOG 0x3
 #define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
 #define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_DBRET_SEQ_LBN 0
+#define        MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define        MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define          MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define          MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define          MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define          MCDI_EVENT_SUC_WATCHDOG 0x4
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define        MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define        MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
@@ -552,73 +645,99 @@
  * been processed and it may now resend the command
  */
 #define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define          MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define          MCDI_EVENT_CODE_SUC 0x1f
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
 #define          MCDI_EVENT_CODE_TESTGEN  0xfa
 #define       MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define       MCDI_EVENT_CMDDONE_DATA_LEN 4
 #define       MCDI_EVENT_CMDDONE_DATA_LBN 0
 #define       MCDI_EVENT_CMDDONE_DATA_WIDTH 32
 #define       MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define       MCDI_EVENT_LINKCHANGE_DATA_LEN 4
 #define       MCDI_EVENT_LINKCHANGE_DATA_LBN 0
 #define       MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
 #define       MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define       MCDI_EVENT_SENSOREVT_DATA_LEN 4
 #define       MCDI_EVENT_SENSOREVT_DATA_LBN 0
 #define       MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
 #define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
 #define       MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_TX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_TX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_TX_ERR_DATA_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_SECONDS_OFST 0
+#define       MCDI_EVENT_PTP_SECONDS_LEN 4
 #define       MCDI_EVENT_PTP_SECONDS_LBN 0
 #define       MCDI_EVENT_PTP_SECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_MAJOR_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
  * of timestamp
  */
 #define       MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define       MCDI_EVENT_PTP_NANOSECONDS_LEN 4
 #define       MCDI_EVENT_PTP_NANOSECONDS_LBN 0
 #define       MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
 /* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
  * timestamp
  */
 #define       MCDI_EVENT_PTP_MINOR_OFST 0
+#define       MCDI_EVENT_PTP_MINOR_LEN 4
 #define       MCDI_EVENT_PTP_MINOR_LBN 0
 #define       MCDI_EVENT_PTP_MINOR_WIDTH 32
 /* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
  */
 #define       MCDI_EVENT_PTP_UUID_OFST 0
+#define       MCDI_EVENT_PTP_UUID_LEN 4
 #define       MCDI_EVENT_PTP_UUID_LBN 0
 #define       MCDI_EVENT_PTP_UUID_WIDTH 32
 #define       MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_RX_ERR_DATA_LEN 4
 #define       MCDI_EVENT_RX_ERR_DATA_LBN 0
 #define       MCDI_EVENT_RX_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_PAR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_PAR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
 #define       MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
 /* For CODE_PTP_TIME events, the major value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
 #define       MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
 #define       MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
 /* For CODE_PTP_TIME events where report sync status is enabled, indicates
  * whether the NIC clock has ever been set
  */
@@ -634,10 +753,17 @@
  */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
 #define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
 #define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
 /* Zero means that the request has been completed or authorized, and the driver
@@ -646,6 +772,10 @@
  */
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
 #define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define       MCDI_EVENT_DBRET_DATA_OFST 0
+#define       MCDI_EVENT_DBRET_DATA_LEN 4
+#define       MCDI_EVENT_DBRET_DATA_LBN 0
+#define       MCDI_EVENT_DBRET_DATA_WIDTH 32
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
@@ -662,6 +792,7 @@
 /* enum: Fatal. */
 #define          FCDI_EVENT_LEVEL_FATAL 0x3
 #define       FCDI_EVENT_DATA_OFST 0
+#define       FCDI_EVENT_DATA_LEN 4
 #define        FCDI_EVENT_LINK_STATE_STATUS_LBN 0
 #define        FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
 #define          FCDI_EVENT_LINK_DOWN 0x0 /* enum */
@@ -701,6 +832,7 @@
 #define          FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
 #define          FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
 #define       FCDI_EVENT_ASSERT_TYPE_LBN 36
@@ -708,12 +840,15 @@
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
 #define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
 #define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define       FCDI_EVENT_LINK_STATE_DATA_LEN 4
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
 #define       FCDI_EVENT_PTP_STATE_OFST 0
+#define       FCDI_EVENT_PTP_STATE_LEN 4
 #define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
 #define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
 #define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
@@ -722,6 +857,7 @@
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
 /* Index of MC port being referred to */
@@ -729,9 +865,11 @@
 #define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
 /* FC Port index that matches the MC port index in SRC */
 #define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
 #define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
 #define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 #define       FCDI_EVENT_BOOT_RESULT_OFST 0
+#define       FCDI_EVENT_BOOT_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
 #define       FCDI_EVENT_BOOT_RESULT_LBN 0
@@ -748,14 +886,17 @@
 #define    FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
 /* Number of timestamps following */
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
 /* Seconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
 /* Nanoseconds field of a timestamp record */
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
 #define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
 /* Timestamp records comprising the event */
@@ -783,6 +924,7 @@
 /* enum: Fatal. */
 #define          MUM_EVENT_LEVEL_FATAL 0x3
 #define       MUM_EVENT_DATA_OFST 0
+#define       MUM_EVENT_DATA_LEN 4
 #define        MUM_EVENT_SENSOR_ID_LBN 0
 #define        MUM_EVENT_SENSOR_ID_WIDTH 8
 /*             Enum values, see field(s): */
@@ -820,18 +962,23 @@
 /* enum: Link fault has been asserted, or has cleared. */
 #define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
 #define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LEN 4
 #define       MUM_EVENT_SENSOR_DATA_LBN 0
 #define       MUM_EVENT_SENSOR_DATA_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LEN 4
 #define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
 #define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
 #define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LEN 4
 #define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
 #define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
 #define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define       MUM_EVENT_PORT_PHY_TECH_LEN 4
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
 #define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
@@ -864,7 +1011,9 @@
 /* MC_CMD_READ32_IN msgrequest */
 #define    MC_CMD_READ32_IN_LEN 8
 #define       MC_CMD_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define       MC_CMD_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_READ32_OUT msgresponse */
 #define    MC_CMD_READ32_OUT_LENMIN 4
@@ -882,13 +1031,14 @@
  */
 #define MC_CMD_WRITE32 0x2
 
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_WRITE32_IN msgrequest */
 #define    MC_CMD_WRITE32_IN_LENMIN 8
 #define    MC_CMD_WRITE32_IN_LENMAX 252
 #define    MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_OFST 4
 #define       MC_CMD_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
@@ -915,6 +1065,7 @@
  * is a bitfield, with each bit as documented below.
  */
 #define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
 /* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
@@ -940,9 +1091,12 @@
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define       MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define       MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
 /* Address of where to jump after copy. */
 #define       MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define       MC_CMD_COPYCODE_IN_JUMP_LEN 4
 /* enum: Control should return to the caller rather than jumping */
 #define          MC_CMD_COPYCODE_JUMP_NONE 0x1
 
@@ -956,12 +1110,13 @@
  */
 #define MC_CMD_SET_FUNC 0x4
 
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_FUNC_IN msgrequest */
 #define    MC_CMD_SET_FUNC_IN_LEN 4
 /* Set function */
 #define       MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define       MC_CMD_SET_FUNC_IN_FUNC_LEN 4
 
 /* MC_CMD_SET_FUNC_OUT msgresponse */
 #define    MC_CMD_SET_FUNC_OUT_LEN 0
@@ -973,7 +1128,7 @@
  */
 #define MC_CMD_GET_BOOT_STATUS 0x5
 
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
 #define    MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -982,9 +1137,11 @@
 #define    MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
 /* ?? */
 #define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
 /* enum: indicates that the MC wasn't flash booted */
 #define          MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL  0xdeadbeef
 #define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
@@ -1007,11 +1164,13 @@
 #define    MC_CMD_GET_ASSERTS_IN_LEN 4
 /* Set to clear assertion */
 #define       MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define       MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
 
 /* MC_CMD_GET_ASSERTS_OUT msgresponse */
 #define    MC_CMD_GET_ASSERTS_OUT_LEN 140
 /* Assertion status flag. */
 #define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
 /* enum: No assertions have failed. */
 #define          MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
 /* enum: A system-level assertion has failed. */
@@ -1024,6 +1183,7 @@
 #define          MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
 /* Failing PC value */
 #define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
 /* Saved GP regs */
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
@@ -1034,7 +1194,9 @@
 #define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
 
 
 /***********************************/
@@ -1050,12 +1212,14 @@
 #define    MC_CMD_LOG_CTRL_IN_LEN 8
 /* Log destination */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
 /* enum: UART. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
 /* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
 #define    MC_CMD_LOG_CTRL_OUT_LEN 0
@@ -1076,23 +1240,29 @@
 #define    MC_CMD_GET_VERSION_EXT_IN_LEN 4
 /* placeholder, set to 0 */
 #define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
 
 /* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
 #define    MC_CMD_GET_VERSION_V0_OUT_LEN 4
 #define       MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define       MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
 /* enum: Reserved version number to indicate "any" version. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
 /* enum: Bootrom version value for Siena. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
 /* enum: Bootrom version value for Huntington. */
 #define          MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
 
 /* MC_CMD_GET_VERSION_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_OUT_LEN 32
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1104,9 +1274,11 @@
 /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
 #define    MC_CMD_GET_VERSION_EXT_OUT_LEN 48
 /*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
 #define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
 /* 128bit mask of functions supported by the current firmware */
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
 #define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1136,41 +1308,54 @@
 #define          MC_CMD_PTP_OP_ENABLE 0x1
 /* enum: Disable PTP packet timestamping operation. */
 #define          MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
 #define          MC_CMD_PTP_OP_TRANSMIT 0x3
 /* enum: Read the current NIC time. */
 #define          MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
 #define          MC_CMD_PTP_OP_STATUS 0x5
 /* enum: Adjust the PTP NIC's time. */
 #define          MC_CMD_PTP_OP_ADJUST 0x6
 /* enum: Synchronize host and NIC time. */
 #define          MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
 /* enum: Reset some of the PTP related statistics */
 #define          MC_CMD_PTP_OP_RESET_STATS 0xa
 /* enum: Debug operations to MC. */
 #define          MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
 #define          MC_CMD_PTP_OP_FPGAWRITE 0xd
 /* enum: Apply an offset to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
 #define          MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
 #define          MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
 #define          MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
 #define          MC_CMD_PTP_OP_RST_CLK 0x14
 /* enum: Enable the forwarding of PPS events to the host */
 #define          MC_CMD_PTP_OP_PPS_ENABLE 0x15
@@ -1191,7 +1376,7 @@
 /* enum: Unsubscribe to stop receiving time events */
 #define          MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
 /* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
 /* enum: Set the PTP sync status. Status is used by firmware to report to event
@@ -1204,11 +1389,15 @@
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
 #define       MC_CMD_PTP_IN_CMD_OFST 0
+#define       MC_CMD_PTP_IN_CMD_LEN 4
 #define       MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define       MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define       MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
 #define       MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define       MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
 /* enum: PTP, version 1 */
 #define          MC_CMD_PTP_MODE_V1 0x0
 /* enum: PTP, version 1, with VLAN headers - deprecated */
@@ -1225,16 +1414,21 @@
 /* MC_CMD_PTP_IN_DISABLE msgrequest */
 #define    MC_CMD_PTP_IN_DISABLE_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TRANSMIT msgrequest */
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
 #define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Transmit packet length */
 #define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
 /* Transmit packet data */
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
@@ -1244,17 +1438,30 @@
 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
 #define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define    MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_STATUS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_ADJUST_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
@@ -1262,21 +1469,67 @@
 #define       MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
 /* enum: Number of fractional bits in frequency adjustment */
 #define          MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define          MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/*               MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
 #define    MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of time readings to capture */
 #define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
 /* Host address in which to write "synchronization started" indication (64
  * bits)
  */
@@ -1288,42 +1541,59 @@
 /* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Enable or disable packet testing */
 #define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_RESET_STATS msgrequest */
 #define    MC_CMD_PTP_IN_RESET_STATS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset PTP statistics */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_DEBUG msgrequest */
 #define    MC_CMD_PTP_IN_DEBUG_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Debug operations */
 #define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAREAD msgrequest */
 #define    MC_CMD_PTP_IN_FPGAREAD_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
 
 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
 #define    MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 #define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
@@ -1332,34 +1602,67 @@
 /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Time adjustment in seconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
 /* Time adjustment major value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
 /* Time adjustment in nanoseconds */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
 /* Time adjustment minor value */
 #define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Frequency adjustment 40 bit fixed point ns */
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
 #define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
 
 /* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Number of VLAN tags, 0 if not VLAN */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
 /* Set of VLAN tags to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
@@ -1368,9 +1671,12 @@
 /* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable UUID filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
 /* UUID to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
 #define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
@@ -1380,18 +1686,25 @@
 /* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
 #define    MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable Domain filtering, 0 to disable */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
 /* Domain number to filter against */
 #define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
 
 /* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
 #define    MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Set the clock source. */
 #define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
 /* enum: External. */
@@ -1400,42 +1713,56 @@
 /* MC_CMD_PTP_IN_RST_CLK msgrequest */
 #define    MC_CMD_PTP_IN_RST_CLK_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Reset value of Timer Reg. */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /* Enable or disable */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define       MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
 /* enum: Enable */
 #define          MC_CMD_PTP_ENABLE_PPS 0x0
 /* enum: Disable */
 #define          MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
 #define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
 
 /* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
 #define    MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
 #define    MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 
 /* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
@@ -1444,29 +1771,39 @@
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* Unsubscribe options */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
 /* enum: Unsubscribe a single queue */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
 /* enum: Unsubscribe all queues */
 #define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
 /* Event queue ID */
 #define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
 
 /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
 #define    MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
 
 /* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
 #define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
 /* NIC - Host System Clock Synchronization status */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
 /* enum: Host System clock and NIC clock are not in sync */
 #define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
 /* enum: Host System clock and NIC clock are synchronized */
@@ -1475,8 +1812,11 @@
  * no longer in sync.
  */
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
 #define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
 
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
@@ -1485,12 +1825,16 @@
 #define    MC_CMD_PTP_OUT_TRANSMIT_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
 
 /* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
 #define    MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
@@ -1502,47 +1846,85 @@
 #define    MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define    MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
 
 /* MC_CMD_PTP_OUT_STATUS msgresponse */
 #define    MC_CMD_PTP_OUT_STATUS_LEN 64
 /* Frequency of NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
 /* Number of packets transmitted and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
 /* Number of packets received and timestamped */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define       MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
 /* Number of packets timestamped by the FPGA */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
 /* Number of packets filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define       MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
 /* Number of packets not filter matched */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
 /* Number of PPS overflows (noise on input?) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
 /* Number of PPS bad periods */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
 /* Minimum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
 /* Maximum period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
 /* Last period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
 /* Mean period of PPS pulse in nanoseconds */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
 /* Minimum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
 /* Maximum offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
 /* Last offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
 /* Mean offset of PPS pulse in nanoseconds (signed) */
 #define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
 
 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
 #define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
@@ -1555,23 +1937,31 @@
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
 /* Host time immediately before NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
 /* Value of seconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
 /* Timestamp major value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
 /* Value of nanoseconds timestamp */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
 /* Timestamp minor value */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
 /* Host time immediately after NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
 /* Number of nanoseconds waited after reading NIC's hardware clock */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
 /* enum: Successful test */
 #define          MC_CMD_PTP_MANF_SUCCESS 0x0
 /* enum: FPGA load failed */
@@ -1604,15 +1994,19 @@
 #define          MC_CMD_PTP_MANF_CLOCK_READ 0xe
 /* Presence of external oscillator */
 #define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
 /* Number of packets received by FPGA */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
 /* Number of packets received by Siena filters */
 #define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
 
 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
 #define    MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
@@ -1628,9 +2022,11 @@
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
  */
 #define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
@@ -1646,12 +2042,16 @@
  * be assumed.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
 /* enum: Times are in seconds and nanoseconds */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
 /* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
 /* enum: Major register has units of seconds, minor 2^-27s per tick */
 #define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
 /* Minimum acceptable value for a corrected synchronization timeset. When
  * comparing host and NIC clock times, the MC returns a set of samples that
  * contain the host start and end time, the MC time when the host start was
@@ -1660,46 +2060,66 @@
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
 /* Various PTP capabilities */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
 /* Uncorrected error on PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
 /* Uncorrected error on PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
 /* Uncorrected error on PPS output in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
 /* Uncorrected error on PPS input in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
 /* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
 /* Uncorrected error on non-PTP receive timestamps in NIC clock format */
 #define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
 
 /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
 #define    MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
 /* Results of testing */
 #define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
@@ -1713,14 +2133,17 @@
  */
 #define MC_CMD_CSR_READ32 0xc
 
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_READ32_IN msgrequest */
 #define    MC_CMD_CSR_READ32_IN_LEN 12
 /* Address */
 #define       MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_READ32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_READ32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define       MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
 
 /* MC_CMD_CSR_READ32_OUT msgresponse */
 #define    MC_CMD_CSR_READ32_OUT_LENMIN 4
@@ -1739,7 +2162,7 @@
  */
 #define MC_CMD_CSR_WRITE32 0xd
 
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_WRITE32_IN msgrequest */
 #define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
@@ -1747,7 +2170,9 @@
 #define    MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
 /* Address */
 #define       MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
@@ -1756,6 +2181,7 @@
 /* MC_CMD_CSR_WRITE32_OUT msgresponse */
 #define    MC_CMD_CSR_WRITE32_OUT_LEN 4
 #define       MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -1776,6 +2202,7 @@
  * sensors.
  */
 #define       MC_CMD_HP_IN_SUBCMD_OFST 0
+#define       MC_CMD_HP_IN_SUBCMD_LEN 4
 /* enum: OCSD (Option Card Sensor Data) sub-command. */
 #define          MC_CMD_HP_IN_OCSD_SUBCMD 0x0
 /* enum: Last known valid HP sub-command. */
@@ -1790,10 +2217,12 @@
  * NULL.)
  */
 #define       MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define       MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
 
 /* MC_CMD_HP_OUT msgresponse */
 #define    MC_CMD_HP_OUT_LEN 4
 #define       MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define       MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
 /* enum: OCSD stopped for this card. */
 #define          MC_CMD_HP_OUT_OCSD_STOPPED 0x1
 /* enum: OCSD was successfully started with the address provided. */
@@ -1838,29 +2267,35 @@
  * external devices.
  */
 #define       MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_READ_IN_BUS_LEN 4
 /* enum: Internal. */
 #define          MC_CMD_MDIO_BUS_INTERNAL 0x0
 /* enum: External. */
 #define          MC_CMD_MDIO_BUS_EXTERNAL 0x1
 /* Port address */
 #define       MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 #define          MC_CMD_MDIO_CLAUSE22 0x20
 /* Address */
 #define       MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_READ_IN_ADDR_LEN 4
 
 /* MC_CMD_MDIO_READ_OUT msgresponse */
 #define    MC_CMD_MDIO_READ_OUT_LEN 8
 /* Value */
 #define       MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define       MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
 /* Status the MDIO commands return the raw status bits from the MDIO block. A
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define       MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
 /* enum: Good. */
 #define          MC_CMD_MDIO_STATUS_GOOD 0x8
 
@@ -1879,22 +2314,27 @@
  * external devices.
  */
 #define       MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
 /* enum: Internal. */
 /*               MC_CMD_MDIO_BUS_INTERNAL 0x0 */
 /* enum: External. */
 /*               MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
 /* Port address */
 #define       MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
 /* Device Address or clause 22. */
 #define       MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
 /* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
  * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
  */
 /*               MC_CMD_MDIO_CLAUSE22 0x20 */
 /* Address */
 #define       MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define       MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
 
 /* MC_CMD_MDIO_WRITE_OUT msgresponse */
 #define    MC_CMD_MDIO_WRITE_OUT_LEN 4
@@ -1902,6 +2342,7 @@
  * "good" transaction should have the DONE bit set and all other bits clear.
  */
 #define       MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
 /* enum: Good. */
 /*               MC_CMD_MDIO_STATUS_GOOD 0x8 */
 
@@ -1912,7 +2353,7 @@
  */
 #define MC_CMD_DBI_WRITE 0x12
 
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_WRITE_IN msgrequest */
 #define    MC_CMD_DBI_WRITE_IN_LENMIN 12
@@ -1932,9 +2373,11 @@
 /* MC_CMD_DBIWROP_TYPEDEF structuredef */
 #define    MC_CMD_DBIWROP_TYPEDEF_LEN 12
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -1944,6 +2387,7 @@
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
 #define       MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
 
@@ -1959,13 +2403,16 @@
 #define    MC_CMD_PORT_READ32_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ32_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ32_OUT msgresponse */
 #define    MC_CMD_PORT_READ32_OUT_LEN 8
 /* Value */
 #define       MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define       MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
 /* Status */
 #define       MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define       MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -1979,13 +2426,16 @@
 #define    MC_CMD_PORT_WRITE32_IN_LEN 8
 /* Address */
 #define       MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define       MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
 
 /* MC_CMD_PORT_WRITE32_OUT msgresponse */
 #define    MC_CMD_PORT_WRITE32_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -1999,6 +2449,7 @@
 #define    MC_CMD_PORT_READ128_IN_LEN 4
 /* Address */
 #define       MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ128_IN_ADDR_LEN 4
 
 /* MC_CMD_PORT_READ128_OUT msgresponse */
 #define    MC_CMD_PORT_READ128_OUT_LEN 20
@@ -2007,6 +2458,7 @@
 #define       MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
 /* Status */
 #define       MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define       MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
 
 
 /***********************************/
@@ -2020,6 +2472,7 @@
 #define    MC_CMD_PORT_WRITE128_IN_LEN 20
 /* Address */
 #define       MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
 /* Value */
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
 #define       MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
@@ -2028,6 +2481,7 @@
 #define    MC_CMD_PORT_WRITE128_OUT_LEN 4
 /* Status */
 #define       MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
 
 /* MC_CMD_CAPABILITIES structuredef */
 #define    MC_CMD_CAPABILITIES_LEN 4
@@ -2072,24 +2526,54 @@
 #define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
 #define    MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
 #define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
  */
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
@@ -2103,7 +2587,7 @@
  */
 #define MC_CMD_DBI_READX 0x19
 
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_READX_IN msgrequest */
 #define    MC_CMD_DBI_READX_IN_LENMIN 8
@@ -2130,9 +2614,11 @@
 /* MC_CMD_DBIRDOP_TYPEDEF structuredef */
 #define    MC_CMD_DBIRDOP_TYPEDEF_LEN 8
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -2149,7 +2635,7 @@
  */
 #define MC_CMD_SET_RAND_SEED 0x1a
 
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_RAND_SEED_IN msgrequest */
 #define    MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -2198,14 +2684,17 @@
 #define    MC_CMD_DRV_ATTACH_IN_LEN 12
 /* new state to set if UPDATE=1 */
 #define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
 #define        MC_CMD_DRV_ATTACH_LBN 0
 #define        MC_CMD_DRV_ATTACH_WIDTH 1
 #define        MC_CMD_DRV_PREBOOT_LBN 1
 #define        MC_CMD_DRV_PREBOOT_WIDTH 1
 /* 1 to set new state, or 0 to just report the existing state */
 #define       MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define       MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
 /* preferred datapath firmware (for Huntington; ignored for Siena) */
 #define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
 /* enum: Prefer to use full featured firmware */
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
@@ -2229,13 +2718,16 @@
 #define    MC_CMD_DRV_ATTACH_OUT_LEN 4
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
 
 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
 #define    MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
 /* previous or existing state, see the bitmask at NEW_STATE */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
 /* Flags associated with this function */
 #define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
 /* enum: Labels the lowest-numbered function visible to the OS */
 #define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
 /* enum: The function can control the link state of the physical port it is
@@ -2260,6 +2752,7 @@
 #define    MC_CMD_SHMUART_IN_LEN 4
 /* ??? */
 #define       MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define       MC_CMD_SHMUART_IN_FLAG_LEN 4
 
 /* MC_CMD_SHMUART_OUT msgresponse */
 #define    MC_CMD_SHMUART_OUT_LEN 0
@@ -2297,6 +2790,7 @@
  * (TBD).
  */
 #define       MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define       MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
 
@@ -2314,8 +2808,10 @@
 #define    MC_CMD_PCIE_CREDITS_IN_LEN 8
 /* poll period. 0 is disabled */
 #define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
 /* wipe statistics */
 #define       MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define       MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
 
 /* MC_CMD_PCIE_CREDITS_OUT msgresponse */
 #define    MC_CMD_PCIE_CREDITS_OUT_LEN 16
@@ -2346,31 +2842,54 @@
 /* MC_CMD_RXD_MONITOR_IN msgrequest */
 #define    MC_CMD_RXD_MONITOR_IN_LEN 12
 #define       MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_IN_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
 #define       MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define       MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
 
 /* MC_CMD_RXD_MONITOR_OUT msgresponse */
 #define    MC_CMD_RXD_MONITOR_OUT_LEN 80
 #define       MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
 #define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
 
 
 /***********************************/
@@ -2379,13 +2898,14 @@
  */
 #define MC_CMD_PUTS 0x23
 
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
 #define    MC_CMD_PUTS_IN_LENMAX 252
 #define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_PUTS_IN_DEST_OFST 0
+#define       MC_CMD_PUTS_IN_DEST_LEN 4
 #define        MC_CMD_PUTS_IN_UART_LBN 0
 #define        MC_CMD_PUTS_IN_UART_WIDTH 1
 #define        MC_CMD_PUTS_IN_PORT_LBN 1
@@ -2417,6 +2937,7 @@
 #define    MC_CMD_GET_PHY_CFG_OUT_LEN 72
 /* flags */
 #define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
@@ -2433,8 +2954,10 @@
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define       MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
 /* Bitmask of supported capabilities */
 #define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
 #define        MC_CMD_PHY_CAP_10HDX_LBN 1
 #define        MC_CMD_PHY_CAP_10HDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_10FDX_LBN 2
@@ -2459,17 +2982,39 @@
 #define        MC_CMD_PHY_CAP_40000FDX_WIDTH 1
 #define        MC_CMD_PHY_CAP_DDM_LBN 12
 #define        MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define        MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define        MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define        MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define        MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define        MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define        MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define       MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
 #define       MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
 /* ?? */
 #define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
 /* enum: Xaui. */
 #define          MC_CMD_MEDIA_XAUI 0x1
 /* enum: CX4. */
@@ -2485,6 +3030,7 @@
 /* enum: QSFP+. */
 #define          MC_CMD_MEDIA_QSFP_PLUS 0x7
 #define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
 /* enum: Native clause 22 */
 #define          MC_CMD_MMD_CLAUSE22 0x0
 #define          MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -2515,6 +3061,7 @@
 #define    MC_CMD_START_BIST_IN_LEN 4
 /* Type of test. */
 #define       MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define       MC_CMD_START_BIST_IN_TYPE_LEN 4
 /* enum: Run the PHY's short cable BIST. */
 #define          MC_CMD_PHY_BIST_CABLE_SHORT 0x1
 /* enum: Run the PHY's long cable BIST. */
@@ -2556,6 +3103,7 @@
 #define    MC_CMD_POLL_BIST_OUT_LEN 8
 /* result */
 #define       MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define       MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
 /* enum: Running. */
 #define          MC_CMD_POLL_BIST_RUNNING 0x1
 /* enum: Passed. */
@@ -2565,19 +3113,26 @@
 /* enum: Timed-out. */
 #define          MC_CMD_POLL_BIST_TIMEOUT 0x4
 #define       MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
 
 /* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
 #define    MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
 /* Status of each channel A */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
 /* enum: Open. */
@@ -2590,14 +3145,17 @@
 #define          MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
 /* Status of each channel B */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel C */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 /* Status of each channel D */
 #define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
 /*            Enum values, see field(s): */
 /*               CABLE_STATUS_A */
 
@@ -2605,9 +3163,11 @@
 #define    MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
 /* enum: Complete. */
 #define          MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
 /* enum: Bus switch off I2C write. */
@@ -2631,9 +3191,11 @@
 #define    MC_CMD_POLL_BIST_OUT_MEM_LEN 36
 /* result */
 /*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
 /*            Enum values, see field(s): */
 /*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
 #define       MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
 /* enum: Test has completed. */
 #define          MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
 /* enum: RAM test - walk ones. */
@@ -2650,8 +3212,10 @@
 #define          MC_CMD_POLL_BIST_MEM_ECC 0x6
 /* Failure address, only valid if result is POLL_BIST_FAILED */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
 /* Bus or address space to which the failure address corresponds */
 #define       MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
 /* enum: MC MIPS bus. */
 #define          MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
 /* enum: CSR IREG bus. */
@@ -2672,14 +3236,19 @@
 #define          MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
 /* Pattern written to RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
 /* Actual value read from RAM / register */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
 /* ECC error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
 /* ECC parity error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
 /* ECC fatal error mask */
 #define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
 
 
 /***********************************/
@@ -2831,6 +3400,143 @@
 /*            Enum values, see field(s): */
 /*               100M */
 
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define    MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/*               MC_CMD_LOOPBACK_NONE  0x0 */
+/* enum: Data. */
+/*               MC_CMD_LOOPBACK_DATA  0x1 */
+/* enum: GMAC. */
+/*               MC_CMD_LOOPBACK_GMAC  0x2 */
+/* enum: XGMII. */
+/*               MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/*               MC_CMD_LOOPBACK_XGXS  0x4 */
+/* enum: XAUI. */
+/*               MC_CMD_LOOPBACK_XAUI  0x5 */
+/* enum: GMII. */
+/*               MC_CMD_LOOPBACK_GMII  0x6 */
+/* enum: SGMII. */
+/*               MC_CMD_LOOPBACK_SGMII  0x7 */
+/* enum: XGBR. */
+/*               MC_CMD_LOOPBACK_XGBR  0x8 */
+/* enum: XFI. */
+/*               MC_CMD_LOOPBACK_XFI  0x9 */
+/* enum: XAUI Far. */
+/*               MC_CMD_LOOPBACK_XAUI_FAR  0xa */
+/* enum: GMII Far. */
+/*               MC_CMD_LOOPBACK_GMII_FAR  0xb */
+/* enum: SGMII Far. */
+/*               MC_CMD_LOOPBACK_SGMII_FAR  0xc */
+/* enum: XFI Far. */
+/*               MC_CMD_LOOPBACK_XFI_FAR  0xd */
+/* enum: GPhy. */
+/*               MC_CMD_LOOPBACK_GPHY  0xe */
+/* enum: PhyXS. */
+/*               MC_CMD_LOOPBACK_PHYXS  0xf */
+/* enum: PCS. */
+/*               MC_CMD_LOOPBACK_PCS  0x10 */
+/* enum: PMA-PMD. */
+/*               MC_CMD_LOOPBACK_PMAPMD  0x11 */
+/* enum: Cross-Port. */
+/*               MC_CMD_LOOPBACK_XPORT  0x12 */
+/* enum: XGMII-Wireside. */
+/*               MC_CMD_LOOPBACK_XGMII_WS  0x13 */
+/* enum: XAUI Wireside. */
+/*               MC_CMD_LOOPBACK_XAUI_WS  0x14 */
+/* enum: XAUI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_FAR  0x15 */
+/* enum: XAUI Wireside near. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_NEAR  0x16 */
+/* enum: GMII Wireside. */
+/*               MC_CMD_LOOPBACK_GMII_WS  0x17 */
+/* enum: XFI Wireside. */
+/*               MC_CMD_LOOPBACK_XFI_WS  0x18 */
+/* enum: XFI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XFI_WS_FAR  0x19 */
+/* enum: PhyXS Wireside. */
+/*               MC_CMD_LOOPBACK_PHYXS_WS  0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/*               MC_CMD_LOOPBACK_PMA_INT  0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/*               MC_CMD_LOOPBACK_SD_NEAR  0x1c */
+/* enum: KR Serdes Serial. */
+/*               MC_CMD_LOOPBACK_SD_FAR  0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/*               MC_CMD_LOOPBACK_PMA_INT_WS  0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/*               MC_CMD_LOOPBACK_SD_FEP2_WS  0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/*               MC_CMD_LOOPBACK_SD_FEP1_5_WS  0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/*               MC_CMD_LOOPBACK_SD_FEP_WS  0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/*               MC_CMD_LOOPBACK_SD_FES_WS  0x22 */
+/* enum: Near side of AOE Siena side port */
+/*               MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23 */
+/* enum: Medford Wireside datapath loopback */
+/*               MC_CMD_LOOPBACK_DATA_WS  0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/*               MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25 */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 25G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 50 loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 100G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/*            Enum values, see field(s): */
+/*               100M */
+
 
 /***********************************/
 /* MC_CMD_GET_LINK
@@ -2848,17 +3554,22 @@
 #define    MC_CMD_GET_LINK_OUT_LEN 28
 /* near-side advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define       MC_CMD_GET_LINK_OUT_CAP_LEN 4
 /* link-partner advertised capabilities */
 #define       MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define       MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
 /* Autonegotiated speed in mbit/s. The link may still be down even if this
  * reads non-zero.
  */
 #define       MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define       MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
 /* Current loopback setting. */
 #define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 #define       MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define       MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
 #define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
@@ -2873,9 +3584,11 @@
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define       MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define       MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
 #define        MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
@@ -2899,8 +3612,10 @@
 #define    MC_CMD_SET_LINK_IN_LEN 16
 /* ??? */
 #define       MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define       MC_CMD_SET_LINK_IN_CAP_LEN 4
 /* Flags */
 #define       MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define       MC_CMD_SET_LINK_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
 #define        MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
@@ -2909,12 +3624,14 @@
 #define        MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
 /* Loopback mode. */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 /* A loopback speed of "0" is supported, and means (choose any available
  * speed).
  */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
 
 /* MC_CMD_SET_LINK_OUT msgresponse */
 #define    MC_CMD_SET_LINK_OUT_LEN 0
@@ -2932,6 +3649,7 @@
 #define    MC_CMD_SET_ID_LED_IN_LEN 4
 /* Set LED state. */
 #define       MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define       MC_CMD_SET_ID_LED_IN_STATE_LEN 4
 #define          MC_CMD_LED_OFF  0x0 /* enum */
 #define          MC_CMD_LED_ON  0x1 /* enum */
 #define          MC_CMD_LED_DEFAULT  0x2 /* enum */
@@ -2954,17 +3672,21 @@
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 #define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
@@ -2978,6 +3700,7 @@
 /* enum: Issue flow control. */
 #define          MC_CMD_FCNTL_GENERATE 0x5
 #define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
@@ -2987,17 +3710,21 @@
  * EtherII, VLAN, bug16011 padding).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
 /* enum: Flow control is off. */
 /*               MC_CMD_FCNTL_OFF 0x0 */
 /* enum: Respond to flow control. */
@@ -3011,6 +3738,7 @@
 /* enum: Issue flow control. */
 /*               MC_CMD_FCNTL_GENERATE 0x5 */
 #define       MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
 /* Select which parameters to configure. A parameter will only be modified if
@@ -3019,6 +3747,7 @@
  * set).
  */
 #define       MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define       MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
@@ -3040,6 +3769,7 @@
  * to 0.
  */
 #define       MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define       MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
 
 
 /***********************************/
@@ -3144,6 +3874,7 @@
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
 #define       MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define       MC_CMD_MAC_STATS_IN_CMD_LEN 4
 #define        MC_CMD_MAC_STATS_IN_DMA_LBN 0
 #define        MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
@@ -3158,9 +3889,16 @@
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
 #define       MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define       MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
 /* port id so vadapter stats can be provided */
 #define       MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
 
 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
 #define    MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -3305,9 +4043,126 @@
 #define          MC_CMD_GMAC_DMABUF_START  0x40
 /* enum: End of GMAC stats buffer space, for Siena only. */
 #define          MC_CMD_GMAC_DMABUF_END    0x5f
-#define          MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define          MC_CMD_MAC_GENERATION_END 0x60
 #define          MC_CMD_MAC_NSTATS  0x61 /* enum */
 
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_FEC_DMABUF_START  0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_UNCORRECTED_ERRORS  0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_CORRECTED_ERRORS  0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0  0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1  0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2  0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3  0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V2  0x68
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_CTPIO_DMABUF_START  0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define          MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK  0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define          MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS  0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define          MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL  0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define          MC_CMD_MAC_CTPIO_OVERFLOW_FAIL  0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define          MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL  0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define          MC_CMD_MAC_CTPIO_TIMEOUT_FAIL  0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define          MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL  0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define          MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL  0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define          MC_CMD_MAC_CTPIO_INVALID_WR_FAIL  0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define          MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK  0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define          MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK  0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define          MC_CMD_MAC_CTPIO_RUNT_FALLBACK  0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define          MC_CMD_MAC_CTPIO_SUCCESS  0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define          MC_CMD_MAC_CTPIO_FALLBACK  0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define          MC_CMD_MAC_CTPIO_POISON  0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define          MC_CMD_MAC_CTPIO_ERASE  0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V3  0x79
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
 
 /***********************************/
 /* MC_CMD_SRIOV
@@ -3318,21 +4173,28 @@
 /* MC_CMD_SRIOV_IN msgrequest */
 #define    MC_CMD_SRIOV_IN_LEN 12
 #define       MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define       MC_CMD_SRIOV_IN_ENABLE_LEN 4
 #define       MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define       MC_CMD_SRIOV_IN_VI_BASE_LEN 4
 #define       MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define       MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
 
 /* MC_CMD_SRIOV_OUT msgresponse */
 #define    MC_CMD_SRIOV_OUT_LEN 8
 #define       MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define       MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
 #define       MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define       MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
 
 /* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
 #define    MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
 /* this is only used for the first record */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
@@ -3342,6 +4204,7 @@
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
 #define          MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
@@ -3352,6 +4215,7 @@
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
 #define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
 
@@ -3403,10 +4267,12 @@
 /* MC_CMD_WOL_FILTER_SET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LEN 192
 #define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
 #define          MC_CMD_FILTER_MODE_SIMPLE    0x0 /* enum */
 #define          MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
 /* A type value of 1 is unused. */
 #define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
 /* enum: Magic */
 #define          MC_CMD_WOL_TYPE_MAGIC      0x0
 /* enum: MS Windows Magic */
@@ -3428,7 +4294,9 @@
 /* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
 #define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
@@ -3437,9 +4305,13 @@
 /* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
@@ -3448,7 +4320,9 @@
 /* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
 #define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
@@ -3461,7 +4335,9 @@
 /* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
 #define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
@@ -3476,8 +4352,11 @@
 /* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
 /*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
@@ -3486,6 +4365,7 @@
 /* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_SET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
@@ -3499,6 +4379,7 @@
 /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
@@ -3516,6 +4397,7 @@
 /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_RESET_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define       MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
 #define          MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
 #define          MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
 
@@ -3556,6 +4438,7 @@
 #define    MC_CMD_NVRAM_TYPES_OUT_LEN 4
 /* Bit mask of supported types. */
 #define       MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define       MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
 /* enum: Disabled callisto. */
 #define          MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
 /* enum: MC firmware. */
@@ -3612,47 +4495,65 @@
 /* MC_CMD_NVRAM_INFO_IN msgrequest */
 #define    MC_CMD_NVRAM_INFO_IN_LEN 4
 #define       MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_INFO_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_OUT_LEN 24
 #define       MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
 
 /* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
 #define    MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
 #define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
 /* Writes must be multiples of this size. Added to support the MUM on Sorrento.
  */
 #define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
 
 
 /***********************************/
@@ -3670,6 +4571,7 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
@@ -3680,9 +4582,11 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
@@ -3703,20 +4607,26 @@
 /* MC_CMD_NVRAM_READ_IN msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_LEN 12
 #define       MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_V2_LEN 16
 #define       MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
 /* amount to read in bytes */
 #define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
 /* Optional control info. If a partition is stored with an A/B versioning
  * scheme (i.e. in more than one physical partition in NVRAM) the host can set
  * this to control which underlying physical partition is used to read data
@@ -3726,6 +4636,7 @@
  * verifying by reading with MODE=TARGET_BACKUP.
  */
 #define       MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define       MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
 /* enum: Same as omitting MODE: caller sees data in current partition unless it
  * holds the write lock in which case it sees data in the partition it is
  * updating.
@@ -3765,10 +4676,13 @@
 #define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
 #define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
@@ -3791,10 +4705,13 @@
 /* MC_CMD_NVRAM_ERASE_IN msgrequest */
 #define    MC_CMD_NVRAM_ERASE_IN_LEN 12
 #define       MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
 #define       MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
 
 /* MC_CMD_NVRAM_ERASE_OUT msgresponse */
 #define    MC_CMD_NVRAM_ERASE_OUT_LEN 0
@@ -3815,9 +4732,11 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
 
 /* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
  * request with additional flags indicating version of NVRAM_UPDATE commands in
@@ -3826,10 +4745,13 @@
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
@@ -3848,16 +4770,19 @@
  * This process takes a few seconds to complete. So is likely to take more than
  * the MCDI timeout. Hence signature verification is initiated when
  * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
- * MCDI command returns immediately with error code EAGAIN. Subsequent
- * NVRAM_UPDATE_FINISH_V2_IN requests also return EAGAIN if the verification is
- * in progress. Once the verification has completed, this response payload
- * includes the results of the signature verification. Note that the nvram lock
- * in firmware is only released after the verification has completed and the
- * host has read back the result code from firmware.
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
 /* Result of nvram update completion processing */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
 /* enum: Verify succeeded without any errors. */
 #define          MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
 /* enum: CMS format verification failed due to an internal error. */
@@ -3884,6 +4809,12 @@
  * Trusted approver's list.
  */
 #define          MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
 
 
 /***********************************/
@@ -3911,6 +4842,7 @@
 /* MC_CMD_REBOOT_IN msgrequest */
 #define    MC_CMD_REBOOT_IN_LEN 4
 #define       MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define       MC_CMD_REBOOT_IN_FLAGS_LEN 4
 #define          MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
 
 /* MC_CMD_REBOOT_OUT msgresponse */
@@ -3947,11 +4879,12 @@
  */
 #define MC_CMD_REBOOT_MODE 0x3f
 
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_REBOOT_MODE_IN msgrequest */
 #define    MC_CMD_REBOOT_MODE_IN_LEN 4
 #define       MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
 /* enum: Normal. */
 #define          MC_CMD_REBOOT_MODE_NORMAL 0x0
 /* enum: Power-on Reset. */
@@ -3966,6 +4899,7 @@
 /* MC_CMD_REBOOT_MODE_OUT msgresponse */
 #define    MC_CMD_REBOOT_MODE_OUT_LEN 4
 #define       MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
 
 
 /***********************************/
@@ -4001,7 +4935,7 @@
  */
 #define MC_CMD_SENSOR_INFO 0x41
 
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_SENSOR_INFO_IN msgrequest */
 #define    MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -4015,12 +4949,14 @@
  * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
  */
 #define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
 
 /* MC_CMD_SENSOR_INFO_OUT msgresponse */
 #define    MC_CMD_SENSOR_INFO_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
 /* enum: Controller temperature: degC */
 #define          MC_CMD_SENSOR_CONTROLLER_TEMP  0x0
 /* enum: Phy common temperature: degC */
@@ -4183,6 +5119,20 @@
 #define          MC_CMD_SENSOR_BOARD_FRONT_TEMP  0x4f
 /* enum: Board temperature (back): degC */
 #define          MC_CMD_SENSOR_BOARD_BACK_TEMP  0x50
+/* enum: 1.8v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V8  0x51
+/* enum: 2.5v power current: mA */
+#define          MC_CMD_SENSOR_IN_I2V5  0x52
+/* enum: 3.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I3V3  0x53
+/* enum: 12v power current: mA */
+#define          MC_CMD_SENSOR_IN_I12V0  0x54
+/* enum: 1.3v power: mV */
+#define          MC_CMD_SENSOR_IN_1V3  0x55
+/* enum: 1.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V3  0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE2_NEXT  0x5f
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4196,6 +5146,7 @@
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
 #define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO_OUT */
 #define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
@@ -4247,7 +5198,7 @@
  */
 #define MC_CMD_READ_SENSORS 0x42
 
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_READ_SENSORS_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_IN_LEN 8
@@ -4266,6 +5217,7 @@
 #define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
 /* Size in bytes of host buffer. */
 #define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_SENSORS_OUT msgresponse */
 #define    MC_CMD_READ_SENSORS_OUT_LEN 0
@@ -4319,6 +5271,7 @@
 /* MC_CMD_GET_PHY_STATE_OUT msgresponse */
 #define    MC_CMD_GET_PHY_STATE_OUT_LEN 4
 #define       MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
 /* enum: Ok. */
 #define          MC_CMD_PHY_STATE_OK 0x1
 /* enum: Faulty. */
@@ -4355,6 +5308,7 @@
 /* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
 #define    MC_CMD_WOL_FILTER_GET_OUT_LEN 4
 #define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
@@ -4371,6 +5325,7 @@
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS  0x2 /* enum */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
@@ -4381,13 +5336,16 @@
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
 
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
 /*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
@@ -4398,6 +5356,7 @@
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
 
 
 /***********************************/
@@ -4412,7 +5371,9 @@
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
 
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
@@ -4451,6 +5412,7 @@
 #define    MC_CMD_TESTASSERT_V2_IN_LEN 4
 /* How to provoke the assertion */
 #define       MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define       MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
 /* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
  * you're testing firmware, this is what you want.
  */
@@ -4486,6 +5448,7 @@
 #define    MC_CMD_WORKAROUND_IN_LEN 8
 /* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define       MC_CMD_WORKAROUND_IN_TYPE_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -4514,6 +5477,7 @@
  * the workaround
  */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define       MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
@@ -4523,6 +5487,7 @@
  */
 #define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
 #define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
 
@@ -4543,6 +5508,7 @@
 /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -4550,6 +5516,7 @@
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
@@ -4568,12 +5535,14 @@
 /* MC_CMD_NVRAM_TEST_IN msgrequest */
 #define    MC_CMD_NVRAM_TEST_IN_LEN 4
 #define       MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 
 /* MC_CMD_NVRAM_TEST_OUT msgresponse */
 #define    MC_CMD_NVRAM_TEST_OUT_LEN 4
 #define       MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define       MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
 /* enum: Passed. */
 #define          MC_CMD_NVRAM_TEST_PASS 0x0
 /* enum: Failed. */
@@ -4594,12 +5563,16 @@
 #define    MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
 /* 0-6 low->high de-emph. */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
 /* 0-8 0-8 low->high boost */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
 /* 0-8 low->high ref.V */
 #define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
 
 /* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
 #define    MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
@@ -4608,10 +5581,13 @@
 #define    MC_CMD_MRSFP_TWEAK_OUT_LEN 12
 /* input bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
 /* output bits */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
 /* direction */
 #define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
 /* enum: Out. */
 #define          MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
 /* enum: In. */
@@ -4626,21 +5602,26 @@
  */
 #define MC_CMD_SENSOR_SET_LIMS 0x4e
 
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
 #define    MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
 #define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
 /* interpretation is is sensor-specific. */
 #define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
 
 /* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
 #define    MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
@@ -4657,9 +5638,13 @@
 /* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
 #define    MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
 #define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
 
 
 /***********************************/
@@ -4680,6 +5665,7 @@
 #define    MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
 /* total number of partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
 /* type ID code for each of NUM_PARTITIONS partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
@@ -4700,6 +5686,7 @@
 #define    MC_CMD_NVRAM_METADATA_IN_LEN 4
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
 
 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */
 #define    MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
@@ -4707,7 +5694,9 @@
 #define    MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
 #define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
 #define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
@@ -4716,6 +5705,7 @@
 #define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
 /* Subtype ID code for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
 /* 1st component of W.X.Y.Z version number for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
 #define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
@@ -4756,8 +5746,10 @@
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
 /* Number of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
 
 
 /***********************************/
@@ -4772,6 +5764,7 @@
 #define    MC_CMD_CLP_IN_LEN 4
 /* Sub operation */
 #define       MC_CMD_CLP_IN_OP_OFST 0
+#define       MC_CMD_CLP_IN_OP_LEN 4
 /* enum: Return to factory default settings */
 #define          MC_CMD_CLP_OP_DEFAULT 0x1
 /* enum: Set MAC address */
@@ -4789,6 +5782,7 @@
 /* MC_CMD_CLP_IN_DEFAULT msgrequest */
 #define    MC_CMD_CLP_IN_DEFAULT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_DEFAULT msgresponse */
 #define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
@@ -4796,6 +5790,7 @@
 /* MC_CMD_CLP_IN_SET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_SET_MAC_LEN 12
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* MAC address assigned to port */
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
@@ -4809,6 +5804,7 @@
 /* MC_CMD_CLP_IN_GET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_GET_MAC_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_MAC msgresponse */
 #define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
@@ -4822,6 +5818,7 @@
 /* MC_CMD_CLP_IN_SET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 /* Boot flag */
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
 #define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
@@ -4832,6 +5829,7 @@
 /* MC_CMD_CLP_IN_GET_BOOT msgrequest */
 #define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
 
 /* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
 #define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
@@ -4849,11 +5847,12 @@
  */
 #define MC_CMD_MUM 0x57
 
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_MUM_IN msgrequest */
 #define    MC_CMD_MUM_IN_LEN 4
 #define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define       MC_CMD_MUM_IN_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_OP_LBN 0
 #define        MC_CMD_MUM_IN_OP_WIDTH 8
 /* enum: NULL MCDI command to MUM */
@@ -4893,26 +5892,32 @@
 #define    MC_CMD_MUM_IN_NULL_LEN 4
 /* MUM cmd header */
 #define       MC_CMD_MUM_IN_CMD_OFST 0
+#define       MC_CMD_MUM_IN_CMD_LEN 4
 
 /* MC_CMD_MUM_IN_GET_VERSION msgrequest */
 #define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_READ_LEN 16
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to read from registers of */
 #define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_READ_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE 0x1
 /* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
 #define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
 /* 32-bit address to read from */
 #define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_READ_ADDR_LEN 4
 /* Number of words to read. */
 #define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
 
 /* MC_CMD_MUM_IN_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_WRITE_LENMIN 16
@@ -4920,12 +5925,15 @@
 #define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* ID of (device connected to MUM) to write to registers of */
 #define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
 /* enum: Hittite HMC1035 clock generator on Sorrento board */
 /*               MC_CMD_MUM_DEV_HITTITE 0x1 */
 /* 32-bit address to write to */
 #define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
 /* Words to write */
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
@@ -4938,12 +5946,16 @@
 #define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* MUM I2C cmd code */
 #define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
 /* Number of bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
 /* Number of bytes to read */
 #define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
 /* Bytes to write */
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
@@ -4954,21 +5966,28 @@
 #define    MC_CMD_MUM_IN_LOG_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define       MC_CMD_MUM_IN_LOG_OP_LEN 4
 #define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
 
 /* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
 #define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/*            MC_CMD_MUM_IN_LOG_OP_LEN 4 */
 /* Enable/disable debug output to UART */
 #define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
@@ -4981,40 +6000,56 @@
 /* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
 /* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
 /* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
@@ -5027,26 +6062,34 @@
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
 
 /* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
 #define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
 
@@ -5054,7 +6097,9 @@
 #define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
 #define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
@@ -5064,13 +6109,16 @@
 #define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Bit-mask of clocks to be programmed */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
 #define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
 #define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
 /* Control flags for clock programming */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
@@ -5082,19 +6130,24 @@
 #define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 /* Enable/Disable FPGA config from flash */
 #define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
 
 /* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
 #define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_IN_QSFP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_LEN 12
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_HDR_LEN 4
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
 #define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
@@ -5104,52 +6157,77 @@
 #define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
 #define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
 #define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
 #define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
 #define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
 
 /* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
 #define    MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
 
 /* MC_CMD_MUM_OUT msgresponse */
 #define    MC_CMD_MUM_OUT_LEN 0
@@ -5160,6 +6238,7 @@
 /* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
 #define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
 #define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
 #define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
@@ -5197,8 +6276,10 @@
 #define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
 /* The first 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO IN register. */
 #define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
@@ -5207,8 +6288,10 @@
 #define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
 /* The first 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
 /* The second 32-bit word read from the GPIO OUT register. */
 #define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
@@ -5216,11 +6299,14 @@
 /* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
 #define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
 
 /* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
@@ -5249,6 +6335,7 @@
 /* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
 #define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
 #define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
 
 /* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
 #define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
@@ -5256,6 +6343,7 @@
 /* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
 #define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
 #define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
@@ -5263,7 +6351,9 @@
 /* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
@@ -5272,6 +6362,7 @@
 /* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
@@ -5279,6 +6370,7 @@
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
 /* in bytes */
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
@@ -5287,11 +6379,14 @@
 /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
 
 /* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
 
 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
@@ -5299,12 +6394,14 @@
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
 /* Discrete (soldered) DDR resistor strap info */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
 /* Number of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
 /* Array of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
@@ -5365,6 +6462,7 @@
 /* EVB_PORT_ID structuredef */
 #define    EVB_PORT_ID_LEN 4
 #define       EVB_PORT_ID_PORT_ID_OFST 0
+#define       EVB_PORT_ID_PORT_ID_LEN 4
 /* enum: An invalid port handle. */
 #define          EVB_PORT_ID_NULL  0x0
 /* enum: The port assigned to this function.. */
@@ -5460,6 +6558,10 @@
 #define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
 /* enum: MUM firmware partition */
 #define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define          NVRAM_PARTITION_TYPE_SUC_FIRMWARE         0xc00
 /* enum: MUM Non-volatile log output partition. */
 #define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
 /* enum: MUM Application table partition. */
@@ -5474,8 +6576,8 @@
 #define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: UEFI expansion ROM if separate from PXE */
 #define          NVRAM_PARTITION_TYPE_EXPANSION_UEFI       0xd00
-/* enum: Spare partition 0 */
-#define          NVRAM_PARTITION_TYPE_SPARE_0              0x1000
+/* enum: Used by the expansion ROM for logging */
+#define          NVRAM_PARTITION_TYPE_PXE_LOG              0x1000
 /* enum: Used for XIP code of shmbooted images */
 #define          NVRAM_PARTITION_TYPE_XIP_SCRATCH          0x1100
 /* enum: Spare partition 2 */
@@ -5488,6 +6590,27 @@
 #define          NVRAM_PARTITION_TYPE_SPARE_4              0x1400
 /* enum: Spare partition 5 */
 #define          NVRAM_PARTITION_TYPE_SPARE_5              0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define          NVRAM_PARTITION_TYPE_STATUS               0x1600
+/* enum: Spare partition 13 */
+#define          NVRAM_PARTITION_TYPE_SPARE_13              0x1700
+/* enum: Spare partition 14 */
+#define          NVRAM_PARTITION_TYPE_SPARE_14              0x1800
+/* enum: Spare partition 15 */
+#define          NVRAM_PARTITION_TYPE_SPARE_15              0x1900
+/* enum: Spare partition 16 */
+#define          NVRAM_PARTITION_TYPE_SPARE_16              0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define          NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS    0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define          NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS    0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define          NVRAM_PARTITION_TYPE_FRU_INFORMATION       0x1d00
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5502,6 +6625,7 @@
 /* LICENSED_APP_ID structuredef */
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
+#define       LICENSED_APP_ID_ID_LEN 4
 /* enum: OpenOnload */
 #define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
@@ -5526,6 +6650,14 @@
 #define          LICENSED_APP_ID_SOLARCAPTURE_TAP        0x400
 /* enum: Capture SolarSystem 40G */
 #define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G  0x1000
+/* enum: ScaleOut Onload */
+#define          LICENSED_APP_ID_SCALEOUT_ONLOAD         0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define          LICENSED_APP_ID_DSHBRD                  0x4000
+/* enum: SolarCapture Trading Analytics */
+#define          LICENSED_APP_ID_SCATRD                  0x8000
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
@@ -5590,6 +6722,14 @@
 #define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_DSHBRD_LBN 14
+#define        LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define        LICENSED_V3_APPS_SCATRD_LBN 15
+#define        LICENSED_V3_APPS_SCATRD_WIDTH 1
 #define       LICENSED_V3_APPS_MASK_LBN 0
 #define       LICENSED_V3_APPS_MASK_WIDTH 64
 
@@ -5636,6 +6776,18 @@
 #define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
 /* enum: This is a TX completion event, not a timestamp */
 #define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION  0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO  0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI  0x13
 /* enum: This is the low part of a TX timestamp event */
 #define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
 /* enum: This is the high part of a TX timestamp event */
@@ -5669,6 +6821,19 @@
 #define       RSS_MODE_HASH_SELECTOR_LBN 0
 #define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
+/* CTPIO_STATS_MAP structuredef */
+#define    CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define       CTPIO_STATS_MAP_VI_OFST 0
+#define       CTPIO_STATS_MAP_VI_LEN 2
+#define       CTPIO_STATS_MAP_VI_LBN 0
+#define       CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define       CTPIO_STATS_MAP_BUCKET_OFST 2
+#define       CTPIO_STATS_MAP_BUCKET_LEN 2
+#define       CTPIO_STATS_MAP_BUCKET_LBN 16
+#define       CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
 
 /***********************************/
 /* MC_CMD_READ_REGS
@@ -5676,7 +6841,7 @@
  */
 #define MC_CMD_READ_REGS 0x50
 
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_REGS_IN msgrequest */
 #define    MC_CMD_READ_REGS_IN_LEN 0
@@ -5709,17 +6874,22 @@
 #define    MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
@@ -5735,6 +6905,7 @@
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
 #define       MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
@@ -5745,13 +6916,16 @@
 #define          MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
@@ -5762,6 +6936,7 @@
 #define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
@@ -5774,6 +6949,7 @@
 #define    MC_CMD_INIT_EVQ_OUT_LEN 4
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
 
 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
@@ -5781,17 +6957,22 @@
 #define    MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
 /* The initial timer value. The load value is ignored if the timer mode is DIS.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
 /* The reload value is ignored in one-shot modes */
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
@@ -5828,6 +7009,7 @@
  */
 #define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
 /* enum: Immediate */
@@ -5838,13 +7020,16 @@
 #define          MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
 /* Target EVQ for wakeups if in wakeup mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
 /* Target interrupt if in interrupting mode (note union with target EVQ). Use
  * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
  * purposes.
  */
 #define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
 /* Event Counter Mode. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
 /* enum: Disabled */
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
 /* enum: Disabled */
@@ -5855,6 +7040,7 @@
 #define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
 /* Event queue packet count threshold. */
 #define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
@@ -5867,8 +7053,10 @@
 #define    MC_CMD_INIT_EVQ_V2_OUT_LEN 8
 /* Only valid if INTRFLAG was true */
 #define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
 /* Actual configuration applied on the card */
 #define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
@@ -5916,17 +7104,22 @@
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
@@ -5945,8 +7138,10 @@
 #define        MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
@@ -5961,17 +7156,22 @@
 #define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to INIT_EVQ
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
@@ -6007,8 +7207,10 @@
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6017,6 +7219,7 @@
 #define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
 /* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
 #define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
 
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
@@ -6040,18 +7243,23 @@
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6072,8 +7280,10 @@
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
@@ -6088,18 +7298,23 @@
 #define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
 /* The EVQ to send events to. This is an index originally specified to
  * INIT_EVQ.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
 /* Desired instance. Must be set to a specific instance, which is a function
  * local queue index.
  */
 #define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -6122,10 +7337,14 @@
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
 /* 64-bit address of 4k of 4k-aligned host memory buffer */
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -6135,6 +7354,7 @@
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
 /* Flags related to Qbb flow control mode. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
@@ -6161,6 +7381,7 @@
  * passed to INIT_EVQ
  */
 #define       MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_EVQ_OUT msgresponse */
 #define    MC_CMD_FINI_EVQ_OUT_LEN 0
@@ -6178,6 +7399,7 @@
 #define    MC_CMD_FINI_RXQ_IN_LEN 4
 /* Instance of RXQ to destroy */
 #define       MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_RXQ_OUT msgresponse */
 #define    MC_CMD_FINI_RXQ_OUT_LEN 0
@@ -6195,6 +7417,7 @@
 #define    MC_CMD_FINI_TXQ_IN_LEN 4
 /* Instance of TXQ to destroy */
 #define       MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
 
 /* MC_CMD_FINI_TXQ_OUT msgresponse */
 #define    MC_CMD_FINI_TXQ_OUT_LEN 0
@@ -6212,6 +7435,7 @@
 #define    MC_CMD_DRIVER_EVENT_IN_LEN 12
 /* Handle of target EVQ */
 #define       MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define       MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
 /* Bits 0 - 63 of event */
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
 #define       MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
@@ -6237,6 +7461,7 @@
 #define    MC_CMD_PROXY_CMD_IN_LEN 4
 /* The handle of the target function. */
 #define       MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define       MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
 #define        MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
@@ -6252,6 +7477,7 @@
 #define    MC_PROXY_STATUS_BUFFER_LEN 16
 /* Handle allocated by the firmware for this proxy transaction */
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
 /* enum: An invalid handle. */
 #define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
 #define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
@@ -6282,6 +7508,7 @@
  * elevated privilege mask granted to the requesting function.
  */
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
 #define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
 
@@ -6298,6 +7525,7 @@
 /* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
 #define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6309,6 +7537,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
@@ -6318,6 +7547,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6328,8 +7558,10 @@
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
@@ -6337,6 +7569,7 @@
 /* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
 #define    MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -6348,6 +7581,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size REPLY_BLOCK_SIZE.
  */
@@ -6357,6 +7591,7 @@
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
 /* Must be a power of 2 */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
  * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -6367,12 +7602,15 @@
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
 /* Must be a power of 2, or zero if this buffer is not provided */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
 /* Applies to all three buffers */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
 /* A bit mask defining which MCDI operations may be proxied */
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
 
 /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
 #define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6392,7 +7630,9 @@
 /* MC_CMD_PROXY_COMPLETE_IN msgrequest */
 #define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
 #define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
 #define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
 /* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
  * is stored in the REPLY_BUFF.
  */
@@ -6408,6 +7648,7 @@
  */
 #define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
 #define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
 
 /* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
 #define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
@@ -6427,17 +7668,22 @@
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
 /* Owner ID to use */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
 /* Size of buffer table pages to use, in bytes (note that only a few values are
  * legal on any specific hardware).
  */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
 
 /* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
 /* Buffer table IDs for use in DMA descriptors. */
 #define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
 
 
 /***********************************/
@@ -6453,10 +7699,13 @@
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
 /* ID */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Num entries */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 /* Buffer table entry address */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
@@ -6479,48 +7728,11 @@
 /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
 #define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
 
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
-/* PORT_CONFIG_ENTRY structuredef */
-#define    PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define       PORT_CONFIG_ENTRY_CORE_OFST 1
-#define       PORT_CONFIG_ENTRY_CORE_LEN 1
-#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
-#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
-#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
-#define       PORT_CONFIG_ENTRY_CORE_LBN 8
-#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define       PORT_CONFIG_ENTRY_LANES_OFST 4
-#define       PORT_CONFIG_ENTRY_LANES_LBN 32
-#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
 
 /***********************************/
 /* MC_CMD_FILTER_OP
@@ -6534,6 +7746,7 @@
 #define    MC_CMD_FILTER_OP_IN_LEN 108
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_IN_OP_LEN 4
 /* enum: single-recipient filter insert */
 #define          MC_CMD_FILTER_OP_IN_OP_INSERT  0x0
 /* enum: single-recipient filter remove */
@@ -6554,8 +7767,10 @@
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
@@ -6586,6 +7801,7 @@
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
@@ -6598,8 +7814,10 @@
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -6614,13 +7832,16 @@
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
@@ -6653,8 +7874,10 @@
 #define       MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
 /* Firmware defined register 1 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define       MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
 /* source IP address to match (as bytes in network order; set last 12 bytes to
  * 0 for IPv4 address)
  */
@@ -6673,6 +7896,7 @@
 #define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* filter handle (for remove / unsubscribe operations) */
@@ -6683,8 +7907,10 @@
 /* The port ID associated with the v-adaptor which should contain this filter.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
@@ -6743,6 +7969,7 @@
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
 /* enum: drop packets */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
 /* enum: receive to host */
@@ -6755,8 +7982,10 @@
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -6771,13 +8000,16 @@
  * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
 /* transmit destination (either set the MAC and/or PM bits for explicit
  * control, or set this field to TX_DEST_DEFAULT for sensible default
  * behaviour)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
@@ -6810,11 +8042,13 @@
 #define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
 /* Firmware defined register 0 to match (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
 /* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
  * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
  * VXLAN/NVGRE, or 1 for Geneve)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
@@ -6880,10 +8114,12 @@
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
 /* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
  * to 0)
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
 /* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
  * order; set last 12 bytes to 0 for IPv4 address)
  */
@@ -6899,6 +8135,7 @@
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6918,6 +8155,7 @@
 #define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_FILTER_OP_EXT_IN/OP */
 /* Returned filter handle (for insert / subscribe operations). Note that these
@@ -6944,6 +8182,7 @@
 #define    MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
 /* enum: read flags indicating restrictions on filter insertion for the calling
@@ -6966,10 +8205,12 @@
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* number of supported match types */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
 /* array of supported match types (valid MATCH_FIELDS values for
  * MC_CMD_FILTER_OP) sorted in decreasing priority order
  */
@@ -6982,10 +8223,12 @@
 #define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
 /* bitfield of filter insertion restrictions */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
 
@@ -7005,11 +8248,16 @@
 #define    MC_CMD_PARSER_DISP_RW_IN_LEN 32
 /* identifies the target of the operation */
 #define       MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
 /* enum: RX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine (with original metadata format) */
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
 /* enum: Lookup engine (with requested metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
@@ -7021,26 +8269,33 @@
 #define          MC_CMD_PARSER_DISP_RW_IN_MISC_STATE  0x5
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
+#define       MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
 #define          MC_CMD_PARSER_DISP_RW_IN_READ  0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
+/* enum: Write a word of DICPU DMEM or a LUE entry. */
 #define          MC_CMD_PARSER_DISP_RW_IN_WRITE  0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). */
 #define          MC_CMD_PARSER_DISP_RW_IN_RMW  0x2
 /* data memory address (DICPU targets) or LUE index (LUE targets) */
 #define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
 /* selector (for MISC_STATE target) */
 #define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
 /* enum: Port to datapath mapping */
 #define          MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING  0x1
 /* value to write (for DMEM writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
 /* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -7049,6 +8304,7 @@
 #define    MC_CMD_PARSER_DISP_RW_OUT_LEN 52
 /* value read (for DMEM reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
 /* value read (for LUE reads) */
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
 #define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
@@ -7093,6 +8349,7 @@
 #define    MC_CMD_SET_PF_COUNT_IN_LEN 4
 /* New number of PFs on the device. */
 #define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
 
 /* MC_CMD_SET_PF_COUNT_OUT msgresponse */
 #define    MC_CMD_SET_PF_COUNT_OUT_LEN 0
@@ -7113,6 +8370,7 @@
 #define    MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
 
 
 /***********************************/
@@ -7127,6 +8385,7 @@
 #define    MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
 /* Identifies the port assignment for this function. */
 #define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
 
 /* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
 #define    MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
@@ -7144,8 +8403,10 @@
 #define    MC_CMD_ALLOC_VIS_IN_LEN 8
 /* The minimum number of VIs that is acceptable */
 #define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
 
 /* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
  * Use extended version in new code.
@@ -7153,21 +8414,26 @@
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
 
 /* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
 #define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
@@ -7201,15 +8467,20 @@
 #define    MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
 #define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous. */
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
 
 
 /***********************************/
@@ -7224,19 +8495,24 @@
 #define    MC_CMD_SET_SRIOV_CFG_IN_LEN 20
 /* Number of VFs currently enabled. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
 /* Max number of VFs before sriov stride and offset may need to be changed. */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
 #define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF, or 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
 /* RID offset of each subsequent VF from the previous, 0 for no change, or
  * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
  */
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
 
 /* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
 #define    MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
@@ -7258,12 +8534,15 @@
 #define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
 /* Function's port vi_shift value (always 0 on Huntington) */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
 
 
 /***********************************/
@@ -7278,6 +8557,7 @@
 #define    MC_CMD_DUMP_VI_STATE_IN_LEN 4
 /* The VI number to query. */
 #define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
 
 /* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
 #define    MC_CMD_DUMP_VI_STATE_OUT_LEN 96
@@ -7311,6 +8591,7 @@
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
 /* Combined metadata field. */
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
@@ -7392,6 +8673,7 @@
 #define    MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
 
 
 /***********************************/
@@ -7406,6 +8688,7 @@
 #define    MC_CMD_FREE_PIOBUF_IN_LEN 4
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 
 /* MC_CMD_FREE_PIOBUF_OUT msgresponse */
 #define    MC_CMD_FREE_PIOBUF_OUT_LEN 0
@@ -7423,6 +8706,7 @@
 #define    MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
 /* VI number to get information for. */
 #define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 
 /* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
@@ -7445,6 +8729,7 @@
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
 #define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
 
 
 /***********************************/
@@ -7459,6 +8744,7 @@
 #define    MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
 /* VI number to set information for. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
 /* Transaction processing steering hint 1 for use with the Rx Queue. */
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
@@ -7478,6 +8764,7 @@
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
 #define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
 
 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
 #define    MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
@@ -7494,6 +8781,7 @@
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /* enum: MISC. */
 #define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC  0x0
 /* enum: IDO. */
@@ -7506,10 +8794,12 @@
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
@@ -7557,10 +8847,12 @@
 /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
 /* Amalgamated TLP info word. */
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
@@ -7627,6 +8919,7 @@
  * in a command from the host.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE     0x0 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET    0x1 /* enum */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS    0x2 /* enum */
@@ -7636,6 +8929,7 @@
  * mc_flash_layout.h.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT  0x0
 /* enum: Valid in phase 2 (PHASE_IMEMS) only */
@@ -7672,12 +8966,14 @@
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL  0xffffffff
 /* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
 /* enum: Last chunk, containing checksum rather than data */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST  0xffffffff
 /* enum: Abort download of this item */
 #define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT  0xfffffffe
 /* Length of this chunk in bytes */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
 /* Data for this chunk */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
@@ -7688,8 +8984,10 @@
 #define    MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
 /* Same as MC_CMD_ERR field, but included as 0 in success cases */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
 /* Extra status information */
 #define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
 /* enum: Code download OK, completed. */
 #define          MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE  0x0
 /* enum: Code download aborted as requested. */
@@ -7726,6 +9024,7 @@
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
@@ -7793,6 +9092,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
@@ -7813,6 +9114,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7822,6 +9125,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
@@ -7848,7 +9153,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -7864,6 +9171,8 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -7888,7 +9197,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -7901,12 +9212,16 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
 
 /* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
@@ -7915,6 +9230,7 @@
 #define    MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
@@ -7982,6 +9298,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
@@ -8002,6 +9320,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8011,6 +9331,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
@@ -8037,7 +9359,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8053,6 +9377,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8077,7 +9403,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8090,14 +9418,19 @@
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8124,6 +9457,18 @@
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
@@ -8181,9 +9526,10 @@
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
 
 /* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
-#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 73
+#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
@@ -8251,6 +9597,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY  0x1
 /* enum: Packed stream RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE  0x5
 /* enum: BIST RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
@@ -8271,6 +9619,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
 /* enum: RXDP Test firmware image 9 */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW  0x10c
 /* TxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -8280,6 +9630,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY  0x1
 /* enum: High packet rate TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE  0x5
 /* enum: BIST TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
@@ -8306,7 +9658,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) RX PD production firmware */
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant RX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8322,6 +9676,8 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine RX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* enum: RX PD firmware parsing but not filtering network overlay tunnel
@@ -8346,7 +9702,9 @@
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
-/* enum: Virtual switching (full feature) TX PD production firmware */
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH  0x3
 /* enum: siena_compat variant TX PD firmware using PM rather than MAC
  * (Huntington development only)
@@ -8359,14 +9717,19 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
 /* enum: Rules engine TX PD production firmware */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
 /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
 /* Licensed capabilities */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -8393,6 +9756,18 @@
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
 /* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
  * on older firmware (check the length).
  */
@@ -8463,6 +9838,348 @@
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K  0x1
 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP  0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE  0x5
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST  0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD  0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST  0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE  0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE  0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS  0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT  0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL  0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY  0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW  0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP  0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE  0x5
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST  0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS  0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR  0x103
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED  0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE  0x8
+/* enum: reserved value - do not use (bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED_9  0x9
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED  0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT  0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED  0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT  0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K   0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K  0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K  0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
 
 
 /***********************************/
@@ -8502,6 +10219,7 @@
 #define    MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
 
 
 /***********************************/
@@ -8516,6 +10234,7 @@
 #define    MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
 
 /* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
@@ -8533,17 +10252,22 @@
 #define    MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
 /* the bucket id */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
 /* the desired maximum fill level */
 #define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
 
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -8561,10 +10285,13 @@
 #define    MC_CMD_TCM_TXQ_INIT_IN_LEN 28
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8573,25 +10300,32 @@
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
 #define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
 /* the txq id */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -8600,18 +10334,23 @@
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
 /* an already reserved bucket (typically set to bucket associated with outer
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
 /* an already reserved bucket (typically set to bucket associated with inner
  * vswitch)
  */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
 
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -8629,8 +10368,10 @@
 #define    MC_CMD_LINK_PIOBUF_IN_LEN 8
 /* Handle for allocated push I/O buffer. */
 #define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_LINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_LINK_PIOBUF_OUT_LEN 0
@@ -8648,6 +10389,7 @@
 #define    MC_CMD_UNLINK_PIOBUF_IN_LEN 4
 /* Function Local Instance (VI) number. */
 #define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
 
 /* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
 #define    MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
@@ -8665,8 +10407,10 @@
 #define    MC_CMD_VSWITCH_ALLOC_IN_LEN 16
 /* The port to connect to the v-switch's upstream port. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of v-switch to create. */
 #define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
@@ -8679,6 +10423,7 @@
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
@@ -8689,6 +10434,7 @@
  * v-ports with this number of tags.
  */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
 #define    MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
@@ -8706,6 +10452,7 @@
 #define    MC_CMD_VSWITCH_FREE_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_FREE_OUT msgresponse */
 #define    MC_CMD_VSWITCH_FREE_OUT_LEN 0
@@ -8725,6 +10472,7 @@
 #define    MC_CMD_VSWITCH_QUERY_IN_LEN 4
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
 #define    MC_CMD_VSWITCH_QUERY_OUT_LEN 0
@@ -8742,8 +10490,10 @@
 #define    MC_CMD_VPORT_ALLOC_IN_LEN 20
 /* The port to which the v-switch is connected. */
 #define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of the new v-port. */
 #define       MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
 /* enum: VLAN (obsolete) */
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN  0x1
 /* enum: VEB (obsolete) */
@@ -8764,6 +10514,7 @@
 #define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST  0x6
 /* Flags controlling v-port creation */
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
@@ -8773,8 +10524,10 @@
  * v-switch.
  */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8784,6 +10537,7 @@
 #define    MC_CMD_VPORT_ALLOC_OUT_LEN 4
 /* The handle of the new v-port */
 #define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
 
 
 /***********************************/
@@ -8798,6 +10552,7 @@
 #define    MC_CMD_VPORT_FREE_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_FREE_OUT msgresponse */
 #define    MC_CMD_VPORT_FREE_OUT_LEN 0
@@ -8815,18 +10570,23 @@
 #define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
 /* The port to connect to the v-adaptor's port. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Flags controlling v-adaptor creation */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
 /* The number of VLAN tags to strip on receive */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
 /* The number of VLAN tags to transparently insert/remove. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -8853,6 +10613,7 @@
 #define    MC_CMD_VADAPTOR_FREE_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_FREE_OUT_LEN 0
@@ -8870,6 +10631,7 @@
 #define    MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The new MAC address to assign to this v-adaptor */
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
 #define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
@@ -8890,6 +10652,7 @@
 #define    MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
@@ -8910,15 +10673,19 @@
 #define    MC_CMD_VADAPTOR_QUERY_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
 /* The number of VLAN tags that may still be added */
 #define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
@@ -8933,8 +10700,10 @@
 #define    MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
 /* The port to assign. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
 /* The target function to modify. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
@@ -8955,9 +10724,13 @@
 /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
 #define    MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
 /* Write enable bits 0-3, set to write, clear to read. */
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
 #define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
@@ -8969,9 +10742,13 @@
  */
 #define    MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
 #define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
 
 
 /***********************************/
@@ -8986,11 +10763,13 @@
 #define    MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
 /* The handle of the owning upstream port */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
 /* The handle of the new Onload stack */
 #define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
 
 
 /***********************************/
@@ -9005,6 +10784,7 @@
 #define    MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
 /* The handle of the Onload stack */
 #define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
 
 /* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
 #define    MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
@@ -9022,8 +10802,10 @@
 #define    MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
 /* The handle of the owning upstream port */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* The type of context to allocate */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
 /* enum: Allocate a context for exclusive use. The key and indirection table
  * must be explicitly configured.
  */
@@ -9037,6 +10819,7 @@
  * in the indirection table will be in the range 0 to NUM_QUEUES-1.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
@@ -9045,6 +10828,7 @@
  * handle.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
 /* enum: guaranteed invalid RSS context handle value */
 #define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
@@ -9061,6 +10845,7 @@
 #define    MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
@@ -9078,6 +10863,7 @@
 #define    MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 /* The 40-byte Toeplitz hash key (TBD endianness issues?) */
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
@@ -9098,6 +10884,7 @@
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
@@ -9118,6 +10905,7 @@
 #define    MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 /* The 128-byte indirection table (1 byte per entry) */
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
@@ -9138,6 +10926,7 @@
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
@@ -9158,6 +10947,7 @@
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 /* Hash control flags. The _EN bits are always supported, but new modes are
  * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
  * in this case, the MODE fields may be set to non-zero values, and will take
@@ -9171,6 +10961,7 @@
  * particular packet type.)
  */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9210,6 +11001,7 @@
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
@@ -9227,6 +11019,7 @@
  * always be used for a SET regardless of old/new driver vs. old/new firmware.
  */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
@@ -9263,11 +11056,13 @@
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
 /* The handle of the owning upstream port */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
 /* Number of queues spanned by this mapping, in the range 1-64; valid fixed
  * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
  * referenced RSS contexts must span no more than this number.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
@@ -9276,6 +11071,7 @@
  * handle.
  */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
 /* enum: guaranteed invalid .1p mapping handle value */
 #define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
@@ -9292,6 +11088,7 @@
 #define    MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
@@ -9309,6 +11106,7 @@
 #define    MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 /* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
  * handle)
  */
@@ -9331,6 +11129,7 @@
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
 /* The handle of the .1p mapping */
 #define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
 
 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
@@ -9356,10 +11155,13 @@
 #define    MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
 /* Base absolute interrupt vector number. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
 
 
 /***********************************/
@@ -9376,10 +11178,13 @@
  * let the system find a suitable base.
  */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
 /* Number of interrupt vectors allocate to this PF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
 /* Number of interrupt vectors to allocate per VF. */
 #define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
 
 /* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
 #define    MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
@@ -9397,6 +11202,7 @@
 #define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9417,6 +11223,7 @@
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
 /* MAC address to add */
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
 #define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -9437,6 +11244,7 @@
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
 
 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
@@ -9444,6 +11252,7 @@
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
 /* The number of MAC addresses returned */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
 /* Array of MAC addresses */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
@@ -9465,8 +11274,10 @@
 #define    MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
 /* The handle of the v-port */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
 /* Flags requesting what should be changed. */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
@@ -9476,14 +11287,17 @@
  * v-switch.
  */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
 /* The number of MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
 /* MAC addresses to add */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
 #define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
@@ -9492,6 +11306,7 @@
 /* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
 #define    MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
 #define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
 
@@ -9508,15 +11323,18 @@
 #define    MC_CMD_EVB_PORT_QUERY_IN_LEN 4
 /* The handle of the v-port */
 #define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
 
 /* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
 #define    MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
 /* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
 /* The number of VLAN tags that may be used on a v-adaptor connected to this
  * EVB port.
  */
 #define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
 
 
 /***********************************/
@@ -9528,14 +11346,16 @@
  */
 #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
 
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
 /* Index of the first buffer table entry. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
 /* Number of buffer table entries to dump. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
@@ -9559,6 +11379,7 @@
 /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
@@ -9588,6 +11409,7 @@
 /* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
 #define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
@@ -9611,8 +11433,10 @@
 #define    MC_CMD_GET_CLOCK_OUT_LEN 8
 /* System frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* DPCPU frequency, MHz */
 #define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 
 
 /***********************************/
@@ -9621,36 +11445,43 @@
  */
 #define MC_CMD_SET_CLOCK 0xad
 
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
 #define    MC_CMD_SET_CLOCK_IN_LEN 28
 /* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
 /* enum: Leave the system clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
 /* enum: Leave the inter-core clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
 /* enum: Leave the DPCPU clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for PCS clock domain */
 #define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
 /* enum: Leave the PCS clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for MC clock domain */
 #define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
 /* enum: Leave the MC clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for rmon clock domain */
 #define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
 /* enum: Leave the rmon clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
 /* Requested frequency in MHz for vswitch clock domain */
 #define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
 /* enum: Leave the vswitch clock domain frequency unchanged */
 #define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
@@ -9658,30 +11489,37 @@
 #define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
 /* enum: The system clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
 /* enum: The inter-core clock domain doesn't exist / isn't used */
 #define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
 /* enum: The dpcpu clock domain doesn't exist */
 #define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
 /* Resulting PCS frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
 /* enum: The PCS clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting MC frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
 /* enum: The MC clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
 /* Resulting rmon frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
 /* enum: The rmon clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
 /* Resulting vswitch frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
 /* enum: The vswitch clock domain doesn't exist / isn't controlled */
 #define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
@@ -9692,11 +11530,12 @@
  */
 #define MC_CMD_DPCPU_RPC 0xae
 
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define       MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
 /* enum: RxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
@@ -9761,12 +11600,15 @@
 #define       MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
 /* Register data to write. Only valid in write/write-read. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
 /* Register address. */
 #define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
 
 /* MC_CMD_DPCPU_RPC_OUT msgresponse */
 #define    MC_CMD_DPCPU_RPC_OUT_LEN 36
 #define       MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define       MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
 /* DATA */
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
@@ -9777,9 +11619,13 @@
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
 #define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
 
 
 /***********************************/
@@ -9794,6 +11640,7 @@
 #define    MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
 /* Interrupt level relative to base for function. */
 #define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
 
 /* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
 #define    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
@@ -9811,6 +11658,7 @@
 #define    MC_CMD_SHMBOOT_OP_IN_LEN 4
 /* Identifies the operation to perform */
 #define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
 /* enum: Copy slave_data section to the slave core. (Greenport only) */
 #define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
 
@@ -9824,13 +11672,16 @@
  */
 #define MC_CMD_CAP_BLK_READ 0xe7
 
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CAP_BLK_READ_IN msgrequest */
 #define    MC_CMD_CAP_BLK_READ_IN_LEN 12
 #define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define       MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
 #define       MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define       MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
 
 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */
 #define    MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
@@ -9850,53 +11701,77 @@
  */
 #define MC_CMD_DUMP_DO 0xe8
 
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_DO_IN msgrequest */
 #define    MC_CMD_DUMP_DO_IN_LEN 52
 #define       MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define       MC_CMD_DUMP_DO_IN_PADDING_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM  0x1 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY  0x2 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI  0x3 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART  0x4 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE  0x1000 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH  0x2 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 /* enum: The uart port this command was received over (if using a uart
  * transport)
  */
 #define          MC_CMD_DUMP_DO_IN_UART_PORT_SRC  0xff
 #define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM  0x0 /* enum */
 #define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION  0x1 /* enum */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 /* MC_CMD_DUMP_DO_OUT msgresponse */
 #define    MC_CMD_DUMP_DO_OUT_LEN 4
 #define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
 
 
 /***********************************/
@@ -9905,41 +11780,64 @@
  */
 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
 
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
 #define    MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
 
 
 /***********************************/
@@ -9950,17 +11848,20 @@
  */
 #define MC_CMD_SET_PSU 0xea
 
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_SET_PSU_IN msgrequest */
 #define    MC_CMD_SET_PSU_IN_LEN 12
 #define       MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define       MC_CMD_SET_PSU_IN_PARAM_LEN 4
 #define          MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE  0x0 /* enum */
 #define       MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define       MC_CMD_SET_PSU_IN_RAIL_LEN 4
 #define          MC_CMD_SET_PSU_IN_RAIL_0V9  0x0 /* enum */
 #define          MC_CMD_SET_PSU_IN_RAIL_1V2  0x1 /* enum */
 /* desired value, eg voltage in mV */
 #define       MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PSU_IN_VALUE_LEN 4
 
 /* MC_CMD_SET_PSU_OUT msgresponse */
 #define    MC_CMD_SET_PSU_OUT_LEN 0
@@ -9980,7 +11881,9 @@
 /* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
 #define    MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
 #define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
 
 
 /***********************************/
@@ -10016,12 +11919,16 @@
 #define    MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
 /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
 #define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
@@ -10044,12 +11951,16 @@
 #define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
 /* CRC32 over OFFSET, LENGTH, RESERVED */
 #define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
 /* Offset from which to read the data */
 #define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
 
 /* MC_CMD_UART_RECV_DATA_IN msgresponse */
 #define    MC_CMD_UART_RECV_DATA_IN_LENMIN 16
@@ -10057,12 +11968,16 @@
 #define    MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
 /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
 #define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
 /* Offset at which to write the data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
 /* Length of data */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
 /* Reserved for future use */
 #define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
@@ -10075,14 +11990,16 @@
  */
 #define MC_CMD_READ_FUSES 0xf0
 
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_FUSES_IN msgrequest */
 #define    MC_CMD_READ_FUSES_IN_LEN 8
 /* Offset in OTP to read */
 #define       MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define       MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
 /* Length of data to read in bytes */
 #define       MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define       MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
 
 /* MC_CMD_READ_FUSES_OUT msgresponse */
 #define    MC_CMD_READ_FUSES_OUT_LENMIN 4
@@ -10090,6 +12007,7 @@
 #define    MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
 /* Length of returned OTP data in bytes */
 #define       MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define       MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
 /* Returned data */
 #define       MC_CMD_READ_FUSES_OUT_DATA_OFST 4
 #define       MC_CMD_READ_FUSES_OUT_DATA_LEN 1
@@ -10197,6 +12115,60 @@
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC  0x9
 /* enum: CTLE EQ Resistor (0-7, Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES  0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN  0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE  0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK  0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN  0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD  0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2  0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3  0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4  0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5  0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6  0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7  0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8  0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9  0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10  0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11  0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12  0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF  0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN  0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD  0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN  0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD  0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT  0x20
+/* enum: CDR integral loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG  0x21
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
@@ -10268,7 +12240,7 @@
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV  0x0
 /* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE  0x1
@@ -10290,9 +12262,9 @@
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 /* enum: TX Amplitude Fine control (Medford) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE  0xa
-/* enum: Pre-shoot Tap (Medford) */
+/* enum: Pre-shoot Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV  0xb
-/* enum: De-emphasis Tap (Medford) */
+/* enum: De-emphasis Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY  0xc
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
@@ -10361,7 +12333,24 @@
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+/* Scan duration / cycle count */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
 
 /* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10393,10 +12382,12 @@
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
 
 /* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
 #define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
 
 
 /***********************************/
@@ -10594,6 +12585,7 @@
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
 #define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
 
 /* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -10636,6 +12628,7 @@
 #define    MC_CMD_LICENSING_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
@@ -10647,23 +12640,30 @@
 #define    MC_CMD_LICENSING_OUT_LEN 28
 /* count of application keys which are valid */
 #define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
 /* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being blacklisted */
 #define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
 /* count of application keys which are invalid due to being for the wrong node
  */
 #define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define       MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
@@ -10683,6 +12683,7 @@
 #define    MC_CMD_LICENSING_V3_IN_LEN 4
 /* identifies the type of operation requested */
 #define       MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_IN_OP_LEN 4
 /* enum: re-read and apply licenses after a license key partition update; note
  * that this operation returns a zero-length response
  */
@@ -10696,20 +12697,26 @@
 #define    MC_CMD_LICENSING_V3_OUT_LEN 88
 /* count of keys which are valid */
 #define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
 /* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
  * MC_CMD_FC_OP_LICENSE)
  */
 #define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
 /* count of keys which are invalid due to being unverifiable */
 #define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
 /* count of keys which are invalid due to being for the wrong node */
 #define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
 /* licensing state (for diagnostics; the exact meaning of the bits in this
  * field are private to the firmware)
  */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
 /* licensing subsystem self-test report (for manftest) */
 #define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
 /* enum: licensing subsystem self-test failed */
 #define          MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL  0x0
 /* enum: licensing subsystem self-test passed */
@@ -10750,8 +12757,10 @@
 #define    MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
 /* type of license (eg 3) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
 /* length of the license ID (in bytes) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
 /* the unique license ID of the adapter */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
@@ -10789,11 +12798,13 @@
 #define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
 /* application ID to query (LICENSED_APP_ID_xxx) */
 #define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
 
 /* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
 #define    MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
@@ -10824,6 +12835,7 @@
 #define    MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
 /* state of this application */
 #define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
 /* enum: no (or invalid) license is present for the application */
 #define          MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED  0x0
 /* enum: a valid license is present for the application */
@@ -10874,8 +12886,10 @@
 #define    MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
 /* enum: mask application */
@@ -10900,8 +12914,10 @@
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
 /* validation challenge */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
@@ -10910,6 +12926,7 @@
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
 /* feature expiry (time_t) */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
 /* validation response */
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
@@ -10918,10 +12935,13 @@
 #define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
 /* the type of operation requested */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
 /* flag */
 #define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
 
 /* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
 #define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
@@ -10959,8 +12979,10 @@
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
 /* application expiry time */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
 /* application expiry units */
 #define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
 /* enum: expiry units are accounting units */
 #define          MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC  0x0
 /* enum: expiry units are calendar days */
@@ -10984,7 +13006,7 @@
  */
 #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
 
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
 #define    MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
@@ -10995,6 +13017,7 @@
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
 /* whether to turn on or turn off the masked features */
 #define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
 /* enum: turn the features off */
 #define          MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF  0x0
 /* enum: turn the features back on */
@@ -11014,12 +13037,13 @@
  */
 #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
 
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
 /* operation code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
 /* enum: install a new license, overwriting any existing temporary license.
  * This is an asynchronous operation owing to the time taken to validate an
  * ECDSA license
@@ -11037,6 +13061,7 @@
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
 /* ECDSA license and signature */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
@@ -11044,15 +13069,18 @@
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
 #define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
 
 /* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
 #define    MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
 /* status code */
 #define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
 /* enum: finished validating and installing license */
 #define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK  0x0
 /* enum: license validation and installation in progress */
@@ -11084,14 +13112,17 @@
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -11101,6 +13132,7 @@
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11123,20 +13155,24 @@
 #define    MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
@@ -11153,6 +13189,7 @@
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
 /* the type of configuration setting to change */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /* enum: Per-TXQ enable for multicast UDP destination lookup for possible
  * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
  */
@@ -11166,6 +13203,7 @@
  * on the type of configuration setting being changed
  */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 /* new value: the details depend on the type of configuration setting being
  * changed
  */
@@ -11190,12 +13228,14 @@
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
 /* the type of configuration setting to read */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
 /* handle for the entity to query: queue handle, EVB port ID, etc. depending on
  * the type of configuration setting being read
  */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
 
 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
@@ -11228,12 +13268,15 @@
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
 /* enum: receive to just the specified queue */
 #define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
 /* enum: receive to multiple queues using RSS context */
@@ -11243,6 +13286,7 @@
  * of 0xFFFFFFFF is guaranteed never to be a valid handle.
  */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
 
 /* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -11265,18 +13309,22 @@
 #define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
 /* configuration flags */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
 /* receive mode */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
 /* enum: receiving to just the specified queue */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
 /* enum: receiving to multiple queues using RSS context */
 #define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
 /* RSS context (for RX_MODE_RSS) */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
 
 
 /***********************************/
@@ -11291,16 +13339,22 @@
 #define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
 /* The rx queue to get stats for. */
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
 
 /* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
 #define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
 
 
 /***********************************/
@@ -11309,6 +13363,8 @@
  */
 #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
 
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
 
@@ -11316,20 +13372,27 @@
 #define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
 /* The maximum number of PFs the device can expose */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
 /* The maximum number of VFs the device can expose in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
 /* The maximum number of MSI-X vectors the device can provide in total */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each PF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
 /* the number of MSI-X vectors the device will allocate by default to each VF
  */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one PF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
 /* the maximum number of MSI-X vectors the device can allocate to any one VF */
 #define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
 
 
 /***********************************/
@@ -11347,10 +13410,13 @@
 #define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
 /* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
 #define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
 /* Default (canonical) board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
 /* Current board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
 
 
 /***********************************/
@@ -11359,21 +13425,26 @@
  */
 #define MC_CMD_READ_ATB 0x100
 
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_ATB_IN msgrequest */
 #define    MC_CMD_READ_ATB_IN_LEN 16
 #define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
 #define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
 #define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
 #define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
 #define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
 #define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
 
 /* MC_CMD_READ_ATB_OUT msgresponse */
 #define    MC_CMD_READ_ATB_OUT_LEN 4
 #define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
 
 
 /***********************************/
@@ -11390,7 +13461,9 @@
 /* Each workaround is represented by a single bit according to the enums below.
  */
 #define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
 #define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
 /* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -11425,6 +13498,7 @@
  * 1,3 = 0x00030001
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
@@ -11434,6 +13508,7 @@
  * set to 1.
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
@@ -11460,6 +13535,10 @@
  * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
  */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN  0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE          0x4000
 /* enum: Set this bit to indicate that a new privilege mask is to be set,
  * otherwise the command will only read the existing mask.
  */
@@ -11469,6 +13548,7 @@
 #define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
 /* For an admin function, always all the privileges are reported. */
 #define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
 
 
 /***********************************/
@@ -11485,12 +13565,14 @@
  * e.g. VF 1,3 = 0x00030001
  */
 #define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
 /* New link state mode to be set */
 #define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
 #define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
@@ -11501,11 +13583,12 @@
 /* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
 #define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
 #define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
 
 
 /***********************************/
 /* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
  * parameter to MC_CMD_INIT_RXQ.
  */
 #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
@@ -11519,8 +13602,10 @@
 #define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
 /* Minimum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
 /* Maximum acceptable snapshot length. */
 #define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
 
 
 /***********************************/
@@ -11529,7 +13614,7 @@
  */
 #define MC_CMD_FUSE_DIAGS 0x102
 
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_FUSE_DIAGS_IN msgrequest */
 #define    MC_CMD_FUSE_DIAGS_IN_LEN 0
@@ -11538,28 +13623,40 @@
 #define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
 /* Total number of mismatched bits between pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 0 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 1 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
 /* Total number of mismatched bits between pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
 /* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
 /* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
 /* Checksum of data after logical OR of pairs in area 2 */
 #define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
 
 
 /***********************************/
@@ -11576,6 +13673,7 @@
 #define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
 /* The groups of functions to have their privilege masks modified. */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
@@ -11584,6 +13682,7 @@
 #define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
 /* For VFS_OF_PF specify the PF, for ONE specify the target function */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
@@ -11592,10 +13691,12 @@
  * refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
 /* Privileges to be removed from the target functions. For privilege
  * definitions refer to the command MC_CMD_PRIVILEGE_MASK
  */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
 
 /* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
 #define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
@@ -11613,8 +13714,10 @@
 #define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
 #define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
@@ -11633,7 +13736,7 @@
  */
 #define MC_CMD_XPM_WRITE_BYTES 0x104
 
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
@@ -11641,8 +13744,10 @@
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
 /* Start address (byte) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
 /* Data */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
@@ -11659,14 +13764,16 @@
  */
 #define MC_CMD_XPM_READ_SECTOR 0x105
 
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
 /* Sector index */
 #define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
 /* Sector size */
 #define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
 
 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
@@ -11674,9 +13781,11 @@
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
 /* Sector type */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
 #define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA      0x3 /* enum */
 #define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
 /* Sector data */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
@@ -11691,7 +13800,7 @@
  */
 #define MC_CMD_XPM_WRITE_SECTOR 0x106
 
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
@@ -11708,10 +13817,12 @@
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
 /* Sector type */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
 /* Sector size */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
 /* Sector data */
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
@@ -11722,6 +13833,7 @@
 #define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
 /* New sector index */
 #define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
 
 
 /***********************************/
@@ -11730,12 +13842,13 @@
  */
 #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
 
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
 /* Sector index */
 #define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
 
 /* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
@@ -11747,14 +13860,16 @@
  */
 #define MC_CMD_XPM_BLANK_CHECK 0x108
 
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
 #define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
@@ -11762,6 +13877,7 @@
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
 /* Total number of bad (non-blank) locations */
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
 /* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
  * into MCDI response)
  */
@@ -11777,14 +13893,16 @@
  */
 #define MC_CMD_XPM_REPAIR 0x109
 
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_REPAIR_IN msgrequest */
 #define    MC_CMD_XPM_REPAIR_IN_LEN 8
 /* Start address (byte) */
 #define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
 /* Count (bytes) */
 #define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
 
 /* MC_CMD_XPM_REPAIR_OUT msgresponse */
 #define    MC_CMD_XPM_REPAIR_OUT_LEN 0
@@ -11797,7 +13915,7 @@
  */
 #define MC_CMD_XPM_DECODER_TEST 0x10a
 
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
 #define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
@@ -11816,7 +13934,7 @@
  */
 #define MC_CMD_XPM_WRITE_TEST 0x10b
 
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
@@ -11842,10 +13960,13 @@
 #define    MC_CMD_EXEC_SIGNED_IN_LEN 28
 /* the length of code to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define       MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
 /* the length of date to include in the CMAC */
 #define       MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define       MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
 /* the XPM sector containing the key to use */
 #define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
 /* the expected CMAC value */
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
 #define       MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
@@ -11868,11 +13989,34 @@
 #define    MC_CMD_PREPARE_SIGNED_IN_LEN 4
 /* the length of data area to clear */
 #define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
 
 /* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
 #define    MC_CMD_PREPARE_SIGNED_OUT_LEN 0
 
 
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
 /***********************************/
 /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
  * Configure UDP ports for tunnel encapsulation hardware acceleration. The
@@ -11913,27 +14057,6 @@
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
 
-/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
-#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
-/* UDP port (the standard ports are named below but any port may be used) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
-/* enum: the IANA allocated UDP port for VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
-/* enum: the IANA allocated UDP port for Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
-/* tunnel encapsulation protocol (only those named below are supported) */
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
-/* enum: VXLAN */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
-/* enum: Geneve */
-#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
-#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
-
 
 /***********************************/
 /* MC_CMD_RX_BALANCING
@@ -11950,12 +14073,16 @@
 #define    MC_CMD_RX_BALANCING_IN_LEN 16
 /* The RX port whose upconverter table will be modified */
 #define       MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define       MC_CMD_RX_BALANCING_IN_PORT_LEN 4
 /* The VLAN priority associated to the table index and vFIFO */
 #define       MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define       MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
 /* The resulting bit of SRC^DST for indexing the table */
 #define       MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define       MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
 /* The RX engine to which the vFIFO in the table entry will point to */
 #define       MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define       MC_CMD_RX_BALANCING_IN_ENG_LEN 4
 
 /* MC_CMD_RX_BALANCING_OUT msgresponse */
 #define    MC_CMD_RX_BALANCING_OUT_LEN 0
@@ -11976,8 +14103,10 @@
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
 /* The tag to be appended */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
 /* The length of the data */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
 /* The data to be contained in the TLV structure */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
@@ -12002,6 +14131,7 @@
 #define    MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
 /* Data type to be checked */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
 
 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
@@ -12009,10 +14139,13 @@
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
 /* Number of sectors found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
 /* Number of bytes found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
 /* Length of signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
 /* Signature */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
@@ -12037,12 +14170,16 @@
 #define    MC_CMD_SET_EVQ_TMR_IN_LEN 16
 /* Function-relative queue instance */
 #define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
 /* Requested value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
 /* Requested value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
 /* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
 #define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS  0x0 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START  0x1 /* enum */
 #define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START  0x2 /* enum */
@@ -12052,8 +14189,10 @@
 #define    MC_CMD_SET_EVQ_TMR_OUT_LEN 8
 /* Actual value for timer load (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
 /* Actual value for timer reload (in nanoseconds) */
 #define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
 
 
 /***********************************/
@@ -12071,29 +14210,35 @@
 #define    MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
 /* Reserved for future use. */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
  * nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
  * load/reload count.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
  * allowed for timer load/reload counts.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
 /* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
  * multiple of this step size will be rounded in an implementation defined
  * manner.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
 /* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
  * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
 /* Timer durations requested via MCDI that are not a multiple of this step size
  * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
 /* For timers updated using the bug35388 workaround, this is the time interval
  * (in nanoseconds) for each increment of the timer load/reload count. The
  * requested duration of a timer is this value multiplied by the timer
@@ -12101,17 +14246,20 @@
  * is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, this is the maximum value
  * allowed for timer load/reload counts. This field is only meaningful if the
  * bug35388 workaround is enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
 /* For timers updated using the bug35388 workaround, timer load/reload counts
  * not a multiple of this step size will be rounded in an implementation
  * defined manner. This field is only meaningful if the bug35388 workaround is
  * enabled.
  */
 #define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
 
 
 /***********************************/
@@ -12129,19 +14277,24 @@
  * local queue index.
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
 /* Will the common pool be used as TX_vFIFO_ULL (1) */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED       0x1 /* enum */
 /* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED      0x0
 /* Number of buffers to reserve for the common pool */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
 /* TX datapath to which the Common Pool is connected to. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
 /* enum: Extracts information from function */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1
 /* Network port or RX Engine to which the common pool connects. */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
 /* enum: Extracts information from function */
 /*               MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE          -0x1 */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0          0x0 /* enum */
@@ -12157,6 +14310,7 @@
 #define    MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
 /* ID of the common pool allocated */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
 
 
 /***********************************/
@@ -12173,8 +14327,10 @@
 /* Common pool previously allocated to which the new vFIFO will be associated
  */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
 /* Port or RX engine to associate the vFIFO egress */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
 /* enum: Extracts information from common pool */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE   -0x1
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0          0x0 /* enum */
@@ -12187,12 +14343,15 @@
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1     0x5
 /* Minimum number of buffers that the pool must have */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
 /* enum: Do not check the space available */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM     0x0
 /* Will the vFIFO be used as TX_vFIFO_ULL */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
 /* Network priority of the vFIFO,if applicable */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
 /* enum: Search for the lowest unused priority */
 #define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE  -0x1
 
@@ -12200,8 +14359,10 @@
 #define    MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
 /* Short vFIFO ID */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
 /* Network priority of the vFIFO */
 #define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
 
 
 /***********************************/
@@ -12217,6 +14378,7 @@
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
 /* Short vFIFO ID */
 #define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
 
 /* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
 #define    MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
@@ -12235,6 +14397,7 @@
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
 /* Common pool ID given when pool allocated */
 #define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
 
 /* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
 #define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
@@ -12256,8 +14419,10 @@
 #define    MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
 /* Available buffers for the ENG to NET vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
 /* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
 
 
 #endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 6e1f282..ce8aabf 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
 	return 0;
 }
 
-static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
+static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
 {
-	u32 result = 0;
+	#define SET_BIT(name)	__set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+					  linkset)
 
+	bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
 	switch (media) {
 	case MC_CMD_MEDIA_KX4:
-		result |= SUPPORTED_Backplane;
+		SET_BIT(Backplane);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
-			result |= SUPPORTED_1000baseKX_Full;
+			SET_BIT(1000baseKX_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-			result |= SUPPORTED_10000baseKX4_Full;
+			SET_BIT(10000baseKX4_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
-			result |= SUPPORTED_40000baseKR4_Full;
+			SET_BIT(40000baseKR4_Full);
 		break;
 
 	case MC_CMD_MEDIA_XFP:
 	case MC_CMD_MEDIA_SFP_PLUS:
 	case MC_CMD_MEDIA_QSFP_PLUS:
-		result |= SUPPORTED_FIBRE;
+		SET_BIT(FIBRE);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
-			result |= SUPPORTED_1000baseT_Full;
+			SET_BIT(1000baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-			result |= SUPPORTED_10000baseT_Full;
+			SET_BIT(10000baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
-			result |= SUPPORTED_40000baseCR4_Full;
+			SET_BIT(40000baseCR4_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+			SET_BIT(100000baseCR4_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+			SET_BIT(25000baseCR_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+			SET_BIT(50000baseCR2_Full);
 		break;
 
 	case MC_CMD_MEDIA_BASE_T:
-		result |= SUPPORTED_TP;
+		SET_BIT(TP);
 		if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
-			result |= SUPPORTED_10baseT_Half;
+			SET_BIT(10baseT_Half);
 		if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
-			result |= SUPPORTED_10baseT_Full;
+			SET_BIT(10baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
-			result |= SUPPORTED_100baseT_Half;
+			SET_BIT(100baseT_Half);
 		if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
-			result |= SUPPORTED_100baseT_Full;
+			SET_BIT(100baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
-			result |= SUPPORTED_1000baseT_Half;
+			SET_BIT(1000baseT_Half);
 		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
-			result |= SUPPORTED_1000baseT_Full;
+			SET_BIT(1000baseT_Full);
 		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
-			result |= SUPPORTED_10000baseT_Full;
+			SET_BIT(10000baseT_Full);
 		break;
 	}
 
 	if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
-		result |= SUPPORTED_Pause;
+		SET_BIT(Pause);
 	if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
-		result |= SUPPORTED_Asym_Pause;
+		SET_BIT(Asym_Pause);
 	if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
-		result |= SUPPORTED_Autoneg;
+		SET_BIT(Autoneg);
 
-	return result;
+	#undef SET_BIT
 }
 
-static u32 ethtool_to_mcdi_cap(u32 cap)
+static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
 {
 	u32 result = 0;
 
-	if (cap & SUPPORTED_10baseT_Half)
+	#define TEST_BIT(name)	test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+					 linkset)
+
+	if (TEST_BIT(10baseT_Half))
 		result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
-	if (cap & SUPPORTED_10baseT_Full)
+	if (TEST_BIT(10baseT_Full))
 		result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
-	if (cap & SUPPORTED_100baseT_Half)
+	if (TEST_BIT(100baseT_Half))
 		result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
-	if (cap & SUPPORTED_100baseT_Full)
+	if (TEST_BIT(100baseT_Full))
 		result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
-	if (cap & SUPPORTED_1000baseT_Half)
+	if (TEST_BIT(1000baseT_Half))
 		result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
-	if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full))
+	if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
 		result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
-	if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
+	if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
 		result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
-	if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+	if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
 		result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
-	if (cap & SUPPORTED_Pause)
+	if (TEST_BIT(100000baseCR4_Full))
+		result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+	if (TEST_BIT(25000baseCR_Full))
+		result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+	if (TEST_BIT(50000baseCR2_Full))
+		result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+	if (TEST_BIT(Pause))
 		result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
-	if (cap & SUPPORTED_Asym_Pause)
+	if (TEST_BIT(Asym_Pause))
 		result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
-	if (cap & SUPPORTED_Autoneg)
+	if (TEST_BIT(Autoneg))
 		result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
 
+	#undef TEST_BIT
+
 	return result;
 }
 
@@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
 	return flags;
 }
 
-static u32 mcdi_to_ethtool_media(u32 media)
+static u8 mcdi_to_ethtool_media(u32 media)
 {
 	switch (media) {
 	case MC_CMD_MEDIA_XAUI:
@@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
 
 	caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
 	if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
-		efx->link_advertising =
-			mcdi_to_ethtool_cap(phy_data->media, caps);
+		mcdi_to_ethtool_linkset(phy_data->media, caps,
+					efx->link_advertising);
 	else
 		phy_data->forced_cap = caps;
 
@@ -435,8 +454,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
 int efx_mcdi_port_reconfigure(struct efx_nic *efx)
 {
 	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
-	u32 caps = (efx->link_advertising ?
-		    ethtool_to_mcdi_cap(efx->link_advertising) :
+	u32 caps = (efx->link_advertising[0] ?
+		    ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
 		    phy_cfg->forced_cap);
 
 	return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
@@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
 	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
 	int rc;
-	u32 supported, advertising, lp_advertising;
 
-	supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
-	advertising = efx->link_advertising;
 	cmd->base.speed = efx->link_state.speed;
 	cmd->base.duplex = efx->link_state.fd;
 	cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
 	cmd->base.phy_address = phy_cfg->port;
-	cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg);
+	cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
 	cmd->base.mdio_support = (efx->mdio.mode_support &
 			      (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
 
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-						supported);
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-						advertising);
+	mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+				cmd->link_modes.supported);
+	memcpy(cmd->link_modes.advertising, efx->link_advertising,
+	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
 
 	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
 			  outbuf, sizeof(outbuf), NULL);
 	if (rc)
 		return;
-	lp_advertising =
-		mcdi_to_ethtool_cap(phy_cfg->media,
-				    MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP));
-
-	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
-						lp_advertising);
+	mcdi_to_ethtool_linkset(phy_cfg->media,
+				MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+				cmd->link_modes.lp_advertising);
 }
 
 static int
@@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
 	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
 	u32 caps;
 	int rc;
-	u32 advertising;
-
-	ethtool_convert_link_mode_to_legacy_u32(&advertising,
-						cmd->link_modes.advertising);
 
 	if (cmd->base.autoneg) {
-		caps = (ethtool_to_mcdi_cap(advertising) |
-			 1 << MC_CMD_PHY_CAP_AN_LBN);
+		caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+			1 << MC_CMD_PHY_CAP_AN_LBN);
 	} else if (cmd->base.duplex) {
 		switch (cmd->base.speed) {
-		case 10:    caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN;    break;
-		case 100:   caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN;   break;
-		case 1000:  caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN;  break;
-		case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
-		case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
-		default:    return -EINVAL;
+		case 10:     caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN;     break;
+		case 100:    caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN;    break;
+		case 1000:   caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN;   break;
+		case 10000:  caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN;  break;
+		case 40000:  caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN;  break;
+		case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+		case 25000:  caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN;  break;
+		case 50000:  caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN;  break;
+		default:     return -EINVAL;
 		}
 	} else {
 		switch (cmd->base.speed) {
-		case 10:    caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN;    break;
-		case 100:   caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN;   break;
-		case 1000:  caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN;  break;
-		default:    return -EINVAL;
+		case 10:     caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN;     break;
+		case 100:    caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN;    break;
+		case 1000:   caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN;   break;
+		default:     return -EINVAL;
 		}
 	}
 
@@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
 		return rc;
 
 	if (cmd->base.autoneg) {
-		efx_link_set_advertising(
-			efx, advertising | ADVERTISED_Autoneg);
+		efx_link_set_advertising(efx, cmd->link_modes.advertising);
 		phy_cfg->forced_cap = 0;
 	} else {
-		efx_link_set_advertising(efx, 0);
+		efx_link_clear_advertising(efx);
 		phy_cfg->forced_cap = caps;
 	}
 	return 0;
@@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = {
 	[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
 	[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
 	[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
 };
 
 void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
@@ -1087,7 +1101,7 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
 	int period = action == EFX_STATS_ENABLE ? 1000 : 0;
 	dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
 	u32 dma_len = action != EFX_STATS_DISABLE ?
-		MC_CMD_MAC_NSTATS * sizeof(u64) : 0;
+		efx->num_mac_stats * sizeof(u64) : 0;
 
 	BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
 
@@ -1121,7 +1135,7 @@ void efx_mcdi_mac_start_stats(struct efx_nic *efx)
 {
 	__le64 *dma_stats = efx->stats_buffer.addr;
 
-	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 
 	efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
 }
@@ -1139,10 +1153,10 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
 	__le64 *dma_stats = efx->stats_buffer.addr;
 	int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
 
-	dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
 	efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
 
-	while (dma_stats[MC_CMD_MAC_GENERATION_END] ==
+	while (dma_stats[efx->num_mac_stats - 1] ==
 				EFX_MC_STATS_GENERATION_INVALID &&
 			attempts-- != 0)
 		udelay(EFX_MAC_STATS_WAIT_US);
@@ -1167,7 +1181,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
 
 	/* Allocate buffer for stats */
 	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
-				  MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+				  efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
 	if (rc)
 		return rc;
 	netif_dbg(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c0537ea..3dd42f3 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -708,6 +708,7 @@ struct vfdi_status;
  * @reset_work: Scheduled reset workitem
  * @membase_phys: Memory BAR value as physical address
  * @membase: Memory BAR value
+ * @vi_stride: step between per-VI registers / memory regions
  * @interrupt_mode: Interrupt mode
  * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
  * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
@@ -773,6 +774,8 @@ struct vfdi_status;
  * @port_initialized: Port initialized?
  * @net_dev: Operating system network device. Consider holding the rtnl lock
  * @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ *	field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
  * @stats_buffer: DMA buffer for statistics
  * @phy_type: PHY type
  * @phy_op: PHY interface
@@ -812,6 +815,7 @@ struct vfdi_status;
  * @vf_init_count: Number of VFs that have been fully initialised.
  * @vi_scale: log2 number of vnics per VF.
  * @ptp_data: PTP state data
+ * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
  * @vpd_sn: Serial number read from VPD
  * @monitor_work: Hardware monitor workitem
  * @biu_lock: BIU (bus interface unit) lock
@@ -842,6 +846,8 @@ struct efx_nic {
 	resource_size_t membase_phys;
 	void __iomem *membase;
 
+	unsigned int vi_stride;
+
 	enum efx_int_mode interrupt_mode;
 	unsigned int timer_quantum_ns;
 	unsigned int timer_max_ns;
@@ -918,6 +924,7 @@ struct efx_nic {
 
 	netdev_features_t fixed_features;
 
+	u16 num_mac_stats;
 	struct efx_buffer stats_buffer;
 	u64 rx_nodesc_drops_total;
 	u64 rx_nodesc_drops_while_down;
@@ -930,7 +937,7 @@ struct efx_nic {
 	unsigned int mdio_bus;
 	enum efx_phy_mode phy_mode;
 
-	u32 link_advertising;
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
 	struct efx_link_state link_state;
 	unsigned int n_link_state_changes;
 
@@ -965,6 +972,7 @@ struct efx_nic {
 #endif
 
 	struct efx_ptp_data *ptp_data;
+	bool ptp_warned;
 
 	char *vpd_sn;
 
@@ -1154,7 +1162,7 @@ struct efx_udp_tunnel {
  */
 struct efx_nic_type {
 	bool is_vf;
-	unsigned int mem_bar;
+	unsigned int (*mem_bar)(struct efx_nic *efx);
 	unsigned int (*mem_map_size)(struct efx_nic *efx);
 	int (*probe)(struct efx_nic *efx);
 	void (*remove)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 7b51b63..7630522 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -325,6 +325,30 @@ enum {
 	EF10_STAT_tx_bad,
 	EF10_STAT_tx_bad_bytes,
 	EF10_STAT_tx_overflow,
+	EF10_STAT_V1_COUNT,
+	EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+	EF10_STAT_fec_corrected_errors,
+	EF10_STAT_fec_corrected_symbols_lane0,
+	EF10_STAT_fec_corrected_symbols_lane1,
+	EF10_STAT_fec_corrected_symbols_lane2,
+	EF10_STAT_fec_corrected_symbols_lane3,
+	EF10_STAT_ctpio_dmabuf_start,
+	EF10_STAT_ctpio_vi_busy_fallback,
+	EF10_STAT_ctpio_long_write_success,
+	EF10_STAT_ctpio_missing_dbell_fail,
+	EF10_STAT_ctpio_overflow_fail,
+	EF10_STAT_ctpio_underflow_fail,
+	EF10_STAT_ctpio_timeout_fail,
+	EF10_STAT_ctpio_noncontig_wr_fail,
+	EF10_STAT_ctpio_frm_clobber_fail,
+	EF10_STAT_ctpio_invalid_wr_fail,
+	EF10_STAT_ctpio_vi_clobber_fallback,
+	EF10_STAT_ctpio_unqualified_fallback,
+	EF10_STAT_ctpio_runt_fallback,
+	EF10_STAT_ctpio_success,
+	EF10_STAT_ctpio_fallback,
+	EF10_STAT_ctpio_poison,
+	EF10_STAT_ctpio_erase,
 	EF10_STAT_COUNT
 };
 
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index caa89bf..3b37d7d 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1662,9 +1662,11 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
 	int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
 
 	if (!ptp) {
-		if (net_ratelimit())
+		if (!efx->ptp_warned) {
 			netif_warn(efx, drv, efx->net_dev,
 				   "Received PTP event but PTP not set up\n");
+			efx->ptp_warned = true;
+		}
 		return;
 	}
 
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a617f65..ae8645a 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -242,6 +242,14 @@ static int siena_dimension_resources(struct efx_nic *efx)
 	return 0;
 }
 
+/* On all Falcon-architecture NICs, PFs use BAR 0 for I/O space and BAR 2(&3)
+ * for memory.
+ */
+static unsigned int siena_mem_bar(struct efx_nic *efx)
+{
+	return 2;
+}
+
 static unsigned int siena_mem_map_size(struct efx_nic *efx)
 {
 	return FR_CZ_MC_TREG_SMEM +
@@ -547,7 +555,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
 
 	dma_stats = efx->stats_buffer.addr;
 
-	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+	generation_end = dma_stats[efx->num_mac_stats - 1];
 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
 		return 0;
 	rmb();
@@ -950,7 +958,7 @@ static int siena_mtd_probe(struct efx_nic *efx)
 
 const struct efx_nic_type siena_a0_nic_type = {
 	.is_vf = false,
-	.mem_bar = EFX_MEM_BAR,
+	.mem_bar = siena_mem_bar,
 	.mem_map_size = siena_mem_map_size,
 	.probe = siena_probe_nic,
 	.remove = siena_remove_nic,
diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig
new file mode 100644
index 0000000..6bcfe27
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Kconfig
@@ -0,0 +1,34 @@
+config NET_VENDOR_SOCIONEXT
+	bool "Socionext ethernet drivers"
+	default y
+	---help---
+	  Option to select ethernet drivers for Socionext platforms.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Socionext devices. If you say Y, you will be asked
+	  for your specific card in the following questions.
+
+if NET_VENDOR_SOCIONEXT
+
+config SNI_AVE
+	tristate "Socionext AVE ethernet support"
+	depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+	select PHYLIB
+	---help---
+	  Driver for gigabit ethernet MACs, called AVE, in the
+	  Socionext UniPhier family.
+
+config SNI_NETSEC
+	tristate "Socionext NETSEC ethernet support"
+	depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF
+	select PHYLIB
+	select MII
+	---help---
+	  Enable to add support for the SocioNext NetSec Gigabit Ethernet
+	  controller + PHY, as found on the Synquacer SC2A11 SoC
+
+	  To compile this driver as a module, choose M here: the module will be
+	  called netsec.  If unsure, say N.
+
+endif #NET_VENDOR_SOCIONEXT
diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile
new file mode 100644
index 0000000..7fd837a
--- /dev/null
+++ b/drivers/net/ethernet/socionext/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for all ethernet ip drivers on Socionext platforms
+#
+obj-$(CONFIG_SNI_AVE) += sni_ave.o
+obj-$(CONFIG_SNI_NETSEC) += netsec.o
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
new file mode 100644
index 0000000..6c263af
--- /dev/null
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -0,0 +1,1777 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+#include <linux/of_mdio.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include <net/tcp.h>
+#include <net/ip6_checksum.h>
+
+#define NETSEC_REG_SOFT_RST			0x104
+#define NETSEC_REG_COM_INIT			0x120
+
+#define NETSEC_REG_TOP_STATUS			0x200
+#define NETSEC_IRQ_RX				BIT(1)
+#define NETSEC_IRQ_TX				BIT(0)
+
+#define NETSEC_REG_TOP_INTEN			0x204
+#define NETSEC_REG_INTEN_SET			0x234
+#define NETSEC_REG_INTEN_CLR			0x238
+
+#define NETSEC_REG_NRM_TX_STATUS		0x400
+#define NETSEC_REG_NRM_TX_INTEN			0x404
+#define NETSEC_REG_NRM_TX_INTEN_SET		0x428
+#define NETSEC_REG_NRM_TX_INTEN_CLR		0x42c
+#define NRM_TX_ST_NTOWNR	BIT(17)
+#define NRM_TX_ST_TR_ERR	BIT(16)
+#define NRM_TX_ST_TXDONE	BIT(15)
+#define NRM_TX_ST_TMREXP	BIT(14)
+
+#define NETSEC_REG_NRM_RX_STATUS		0x440
+#define NETSEC_REG_NRM_RX_INTEN			0x444
+#define NETSEC_REG_NRM_RX_INTEN_SET		0x468
+#define NETSEC_REG_NRM_RX_INTEN_CLR		0x46c
+#define NRM_RX_ST_RC_ERR	BIT(16)
+#define NRM_RX_ST_PKTCNT	BIT(15)
+#define NRM_RX_ST_TMREXP	BIT(14)
+
+#define NETSEC_REG_PKT_CMD_BUF			0xd0
+
+#define NETSEC_REG_CLK_EN			0x100
+
+#define NETSEC_REG_PKT_CTRL			0x140
+
+#define NETSEC_REG_DMA_TMR_CTRL			0x20c
+#define NETSEC_REG_F_TAIKI_MC_VER		0x22c
+#define NETSEC_REG_F_TAIKI_VER			0x230
+#define NETSEC_REG_DMA_HM_CTRL			0x214
+#define NETSEC_REG_DMA_MH_CTRL			0x220
+#define NETSEC_REG_ADDR_DIS_CORE		0x218
+#define NETSEC_REG_DMAC_HM_CMD_BUF		0x210
+#define NETSEC_REG_DMAC_MH_CMD_BUF		0x21c
+
+#define NETSEC_REG_NRM_TX_PKTCNT		0x410
+
+#define NETSEC_REG_NRM_TX_DONE_PKTCNT		0x414
+#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT	0x418
+
+#define NETSEC_REG_NRM_TX_TMR			0x41c
+
+#define NETSEC_REG_NRM_RX_PKTCNT		0x454
+#define NETSEC_REG_NRM_RX_RXINT_PKTCNT		0x458
+#define NETSEC_REG_NRM_TX_TXINT_TMR		0x420
+#define NETSEC_REG_NRM_RX_RXINT_TMR		0x460
+
+#define NETSEC_REG_NRM_RX_TMR			0x45c
+
+#define NETSEC_REG_NRM_TX_DESC_START_UP		0x434
+#define NETSEC_REG_NRM_TX_DESC_START_LW		0x408
+#define NETSEC_REG_NRM_RX_DESC_START_UP		0x474
+#define NETSEC_REG_NRM_RX_DESC_START_LW		0x448
+
+#define NETSEC_REG_NRM_TX_CONFIG		0x430
+#define NETSEC_REG_NRM_RX_CONFIG		0x470
+
+#define MAC_REG_STATUS				0x1024
+#define MAC_REG_DATA				0x11c0
+#define MAC_REG_CMD				0x11c4
+#define MAC_REG_FLOW_TH				0x11cc
+#define MAC_REG_INTF_SEL			0x11d4
+#define MAC_REG_DESC_INIT			0x11fc
+#define MAC_REG_DESC_SOFT_RST			0x1204
+#define NETSEC_REG_MODE_TRANS_COMP_STATUS	0x500
+
+#define GMAC_REG_MCR				0x0000
+#define GMAC_REG_MFFR				0x0004
+#define GMAC_REG_GAR				0x0010
+#define GMAC_REG_GDR				0x0014
+#define GMAC_REG_FCR				0x0018
+#define GMAC_REG_BMR				0x1000
+#define GMAC_REG_RDLAR				0x100c
+#define GMAC_REG_TDLAR				0x1010
+#define GMAC_REG_OMR				0x1018
+
+#define MHZ(n)		((n) * 1000 * 1000)
+
+#define NETSEC_TX_SHIFT_OWN_FIELD		31
+#define NETSEC_TX_SHIFT_LD_FIELD		30
+#define NETSEC_TX_SHIFT_DRID_FIELD		24
+#define NETSEC_TX_SHIFT_PT_FIELD		21
+#define NETSEC_TX_SHIFT_TDRID_FIELD		16
+#define NETSEC_TX_SHIFT_CC_FIELD		15
+#define NETSEC_TX_SHIFT_FS_FIELD		9
+#define NETSEC_TX_LAST				8
+#define NETSEC_TX_SHIFT_CO			7
+#define NETSEC_TX_SHIFT_SO			6
+#define NETSEC_TX_SHIFT_TRS_FIELD		4
+
+#define NETSEC_RX_PKT_OWN_FIELD			31
+#define NETSEC_RX_PKT_LD_FIELD			30
+#define NETSEC_RX_PKT_SDRID_FIELD		24
+#define NETSEC_RX_PKT_FR_FIELD			23
+#define NETSEC_RX_PKT_ER_FIELD			21
+#define NETSEC_RX_PKT_ERR_FIELD			16
+#define NETSEC_RX_PKT_TDRID_FIELD		12
+#define NETSEC_RX_PKT_FS_FIELD			9
+#define NETSEC_RX_PKT_LS_FIELD			8
+#define NETSEC_RX_PKT_CO_FIELD			6
+
+#define NETSEC_RX_PKT_ERR_MASK			3
+
+#define NETSEC_MAX_TX_PKT_LEN			1518
+#define NETSEC_MAX_TX_JUMBO_PKT_LEN		9018
+
+#define NETSEC_RING_GMAC			15
+#define NETSEC_RING_MAX				2
+
+#define NETSEC_TCP_SEG_LEN_MAX			1460
+#define NETSEC_TCP_JUMBO_SEG_LEN_MAX		8960
+
+#define NETSEC_RX_CKSUM_NOTAVAIL		0
+#define NETSEC_RX_CKSUM_OK			1
+#define NETSEC_RX_CKSUM_NG			2
+
+#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END	BIT(20)
+#define NETSEC_IRQ_TRANSITION_COMPLETE		BIT(4)
+
+#define NETSEC_MODE_TRANS_COMP_IRQ_N2T		BIT(20)
+#define NETSEC_MODE_TRANS_COMP_IRQ_T2N		BIT(19)
+
+#define NETSEC_INT_PKTCNT_MAX			2047
+
+#define NETSEC_FLOW_START_TH_MAX		95
+#define NETSEC_FLOW_STOP_TH_MAX			95
+#define NETSEC_FLOW_PAUSE_TIME_MIN		5
+
+#define NETSEC_CLK_EN_REG_DOM_ALL		0x3f
+
+#define NETSEC_PKT_CTRL_REG_MODE_NRM		BIT(28)
+#define NETSEC_PKT_CTRL_REG_EN_JUMBO		BIT(27)
+#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER	BIT(3)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE	BIT(2)
+#define NETSEC_PKT_CTRL_REG_LOG_HD_ER		BIT(1)
+#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH	BIT(0)
+
+#define NETSEC_CLK_EN_REG_DOM_G			BIT(5)
+#define NETSEC_CLK_EN_REG_DOM_C			BIT(1)
+#define NETSEC_CLK_EN_REG_DOM_D			BIT(0)
+
+#define NETSEC_COM_INIT_REG_DB			BIT(2)
+#define NETSEC_COM_INIT_REG_CLS			BIT(1)
+#define NETSEC_COM_INIT_REG_ALL			(NETSEC_COM_INIT_REG_CLS | \
+						 NETSEC_COM_INIT_REG_DB)
+
+#define NETSEC_SOFT_RST_REG_RESET		0
+#define NETSEC_SOFT_RST_REG_RUN			BIT(31)
+
+#define NETSEC_DMA_CTRL_REG_STOP		1
+#define MH_CTRL__MODE_TRANS			BIT(20)
+
+#define NETSEC_GMAC_CMD_ST_READ			0
+#define NETSEC_GMAC_CMD_ST_WRITE		BIT(28)
+#define NETSEC_GMAC_CMD_ST_BUSY			BIT(31)
+
+#define NETSEC_GMAC_BMR_REG_COMMON		0x00412080
+#define NETSEC_GMAC_BMR_REG_RESET		0x00020181
+#define NETSEC_GMAC_BMR_REG_SWR			0x00000001
+
+#define NETSEC_GMAC_OMR_REG_ST			BIT(13)
+#define NETSEC_GMAC_OMR_REG_SR			BIT(1)
+
+#define NETSEC_GMAC_MCR_REG_IBN			BIT(30)
+#define NETSEC_GMAC_MCR_REG_CST			BIT(25)
+#define NETSEC_GMAC_MCR_REG_JE			BIT(20)
+#define NETSEC_MCR_PS				BIT(15)
+#define NETSEC_GMAC_MCR_REG_FES			BIT(14)
+#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON	0x0000280c
+#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON	0x0001a00c
+
+#define NETSEC_FCR_RFE				BIT(2)
+#define NETSEC_FCR_TFE				BIT(1)
+
+#define NETSEC_GMAC_GAR_REG_GW			BIT(1)
+#define NETSEC_GMAC_GAR_REG_GB			BIT(0)
+
+#define NETSEC_GMAC_GAR_REG_SHIFT_PA		11
+#define NETSEC_GMAC_GAR_REG_SHIFT_GR		6
+#define GMAC_REG_SHIFT_CR_GAR			2
+
+#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ	2
+#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ	3
+#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ	0
+#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ	1
+#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ	4
+#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ	5
+
+#define NETSEC_GMAC_RDLAR_REG_COMMON		0x18000
+#define NETSEC_GMAC_TDLAR_REG_COMMON		0x1c000
+
+#define NETSEC_REG_NETSEC_VER_F_TAIKI		0x50000
+
+#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP	BIT(31)
+#define NETSEC_REG_DESC_RING_CONFIG_CH_RST	BIT(30)
+#define NETSEC_REG_DESC_TMR_MODE		4
+#define NETSEC_REG_DESC_ENDIAN			0
+
+#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST	1
+#define NETSEC_MAC_DESC_INIT_REG_INIT		1
+
+#define NETSEC_EEPROM_MAC_ADDRESS		0x00
+#define NETSEC_EEPROM_HM_ME_ADDRESS_H		0x08
+#define NETSEC_EEPROM_HM_ME_ADDRESS_L		0x0C
+#define NETSEC_EEPROM_HM_ME_SIZE		0x10
+#define NETSEC_EEPROM_MH_ME_ADDRESS_H		0x14
+#define NETSEC_EEPROM_MH_ME_ADDRESS_L		0x18
+#define NETSEC_EEPROM_MH_ME_SIZE		0x1C
+#define NETSEC_EEPROM_PKT_ME_ADDRESS		0x20
+#define NETSEC_EEPROM_PKT_ME_SIZE		0x24
+
+#define DESC_NUM	128
+#define NAPI_BUDGET	(DESC_NUM / 2)
+
+#define DESC_SZ	sizeof(struct netsec_de)
+
+#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)	((x) & 0xffff0000)
+
+enum ring_id {
+	NETSEC_RING_TX = 0,
+	NETSEC_RING_RX
+};
+
+struct netsec_desc {
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+	void *addr;
+	u16 len;
+};
+
+struct netsec_desc_ring {
+	phys_addr_t desc_phys;
+	struct netsec_desc *desc;
+	void *vaddr;
+	u16 pkt_cnt;
+	u16 head, tail;
+};
+
+struct netsec_priv {
+	struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
+	struct ethtool_coalesce et_coalesce;
+	spinlock_t reglock; /* protect reg access */
+	struct napi_struct napi;
+	phy_interface_t phy_interface;
+	struct net_device *ndev;
+	struct device_node *phy_np;
+	struct phy_device *phydev;
+	struct mii_bus *mii_bus;
+	void __iomem *ioaddr;
+	void __iomem *eeprom_base;
+	struct device *dev;
+	struct clk *clk;
+	u32 msg_enable;
+	u32 freq;
+	bool rx_cksum_offload_flag;
+};
+
+struct netsec_de { /* Netsec Descriptor layout */
+	u32 attr;
+	u32 data_buf_addr_up;
+	u32 data_buf_addr_lw;
+	u32 buf_len_info;
+};
+
+struct netsec_tx_pkt_ctrl {
+	u16 tcp_seg_len;
+	bool tcp_seg_offload_flag;
+	bool cksum_offload_flag;
+};
+
+struct netsec_rx_pkt_info {
+	int rx_cksum_result;
+	int err_code;
+	bool err_flag;
+};
+
+static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
+{
+	writel(val, priv->ioaddr + reg_addr);
+}
+
+static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
+{
+	return readl(priv->ioaddr + reg_addr);
+}
+
+/************* MDIO BUS OPS FOLLOW *************/
+
+#define TIMEOUT_SPINS_MAC		1000
+#define TIMEOUT_SECONDARY_MS_MAC	100
+
+static u32 netsec_clk_type(u32 freq)
+{
+	if (freq < MHZ(35))
+		return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
+	if (freq < MHZ(60))
+		return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
+	if (freq < MHZ(100))
+		return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
+	if (freq < MHZ(150))
+		return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
+	if (freq < MHZ(250))
+		return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
+
+	return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
+}
+
+static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
+{
+	u32 timeout = TIMEOUT_SPINS_MAC;
+
+	while (--timeout && netsec_read(priv, addr) & mask)
+		cpu_relax();
+	if (timeout)
+		return 0;
+
+	timeout = TIMEOUT_SECONDARY_MS_MAC;
+	while (--timeout && netsec_read(priv, addr) & mask)
+		usleep_range(1000, 2000);
+
+	if (timeout)
+		return 0;
+
+	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+	return -ETIMEDOUT;
+}
+
+static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
+{
+	netsec_write(priv, MAC_REG_DATA, value);
+	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
+	return netsec_wait_while_busy(priv,
+				      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+}
+
+static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
+{
+	int ret;
+
+	netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
+	ret = netsec_wait_while_busy(priv,
+				     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
+	if (ret)
+		return ret;
+
+	*read = netsec_read(priv, MAC_REG_DATA);
+
+	return 0;
+}
+
+static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
+				      u32 addr, u32 mask)
+{
+	u32 timeout = TIMEOUT_SPINS_MAC;
+	int ret, data;
+
+	do {
+		ret = netsec_mac_read(priv, addr, &data);
+		if (ret)
+			break;
+		cpu_relax();
+	} while (--timeout && (data & mask));
+
+	if (timeout)
+		return 0;
+
+	timeout = TIMEOUT_SECONDARY_MS_MAC;
+	do {
+		usleep_range(1000, 2000);
+
+		ret = netsec_mac_read(priv, addr, &data);
+		if (ret)
+			break;
+		cpu_relax();
+	} while (--timeout && (data & mask));
+
+	if (timeout && !ret)
+		return 0;
+
+	netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
+
+	return -ETIMEDOUT;
+}
+
+static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+{
+	struct phy_device *phydev = priv->ndev->phydev;
+	u32 value = 0;
+
+	value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
+				 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
+
+	if (phydev->speed != SPEED_1000)
+		value |= NETSEC_MCR_PS;
+
+	if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
+	    phydev->speed == SPEED_100)
+		value |= NETSEC_GMAC_MCR_REG_FES;
+
+	value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
+
+	if (phy_interface_mode_is_rgmii(priv->phy_interface))
+		value |= NETSEC_GMAC_MCR_REG_IBN;
+
+	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int netsec_phy_write(struct mii_bus *bus,
+			    int phy_addr, int reg, u16 val)
+{
+	struct netsec_priv *priv = bus->priv;
+
+	if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_GAR,
+			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+			     reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+			     NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
+			     (netsec_clk_type(priv->freq) <<
+			      GMAC_REG_SHIFT_CR_GAR)))
+		return -ETIMEDOUT;
+
+	return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+					  NETSEC_GMAC_GAR_REG_GB);
+}
+
+static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+{
+	struct netsec_priv *priv = bus->priv;
+	u32 data;
+	int ret;
+
+	if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
+			     phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
+			     reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
+			     (netsec_clk_type(priv->freq) <<
+			      GMAC_REG_SHIFT_CR_GAR)))
+		return -ETIMEDOUT;
+
+	ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+					 NETSEC_GMAC_GAR_REG_GB);
+	if (ret)
+		return ret;
+
+	ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
+	if (ret)
+		return ret;
+
+	return data;
+}
+
+/************* ETHTOOL_OPS FOLLOW *************/
+
+static void netsec_et_get_drvinfo(struct net_device *net_device,
+				  struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, "netsec", sizeof(info->driver));
+	strlcpy(info->bus_info, dev_name(net_device->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static int netsec_et_get_coalesce(struct net_device *net_device,
+				  struct ethtool_coalesce *et_coalesce)
+{
+	struct netsec_priv *priv = netdev_priv(net_device);
+
+	*et_coalesce = priv->et_coalesce;
+
+	return 0;
+}
+
+static int netsec_et_set_coalesce(struct net_device *net_device,
+				  struct ethtool_coalesce *et_coalesce)
+{
+	struct netsec_priv *priv = netdev_priv(net_device);
+
+	priv->et_coalesce = *et_coalesce;
+
+	if (priv->et_coalesce.tx_coalesce_usecs < 50)
+		priv->et_coalesce.tx_coalesce_usecs = 50;
+	if (priv->et_coalesce.tx_max_coalesced_frames < 1)
+		priv->et_coalesce.tx_max_coalesced_frames = 1;
+
+	netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
+		     priv->et_coalesce.tx_max_coalesced_frames);
+	netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
+		     priv->et_coalesce.tx_coalesce_usecs);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
+
+	if (priv->et_coalesce.rx_coalesce_usecs < 50)
+		priv->et_coalesce.rx_coalesce_usecs = 50;
+	if (priv->et_coalesce.rx_max_coalesced_frames < 1)
+		priv->et_coalesce.rx_max_coalesced_frames = 1;
+
+	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
+		     priv->et_coalesce.rx_max_coalesced_frames);
+	netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
+		     priv->et_coalesce.rx_coalesce_usecs);
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
+
+	return 0;
+}
+
+static u32 netsec_et_get_msglevel(struct net_device *dev)
+{
+	struct netsec_priv *priv = netdev_priv(dev);
+
+	return priv->msg_enable;
+}
+
+static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
+{
+	struct netsec_priv *priv = netdev_priv(dev);
+
+	priv->msg_enable = datum;
+}
+
+static const struct ethtool_ops netsec_ethtool_ops = {
+	.get_drvinfo		= netsec_et_get_drvinfo,
+	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
+	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
+	.get_link		= ethtool_op_get_link,
+	.get_coalesce		= netsec_et_get_coalesce,
+	.set_coalesce		= netsec_et_set_coalesce,
+	.get_msglevel		= netsec_et_get_msglevel,
+	.set_msglevel		= netsec_et_set_msglevel,
+};
+
+/************* NETDEV_OPS FOLLOW *************/
+
+static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
+					struct netsec_desc *desc)
+{
+	struct sk_buff *skb;
+
+	if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
+		skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
+	} else {
+		desc->len = L1_CACHE_ALIGN(desc->len);
+		skb = netdev_alloc_skb(priv->ndev, desc->len);
+	}
+	if (!skb)
+		return NULL;
+
+	desc->addr = skb->data;
+	desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
+					DMA_FROM_DEVICE);
+	if (dma_mapping_error(priv->dev, desc->dma_addr)) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+	return skb;
+}
+
+static void netsec_set_rx_de(struct netsec_priv *priv,
+			     struct netsec_desc_ring *dring, u16 idx,
+			     const struct netsec_desc *desc,
+			     struct sk_buff *skb)
+{
+	struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
+	u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
+		   (1 << NETSEC_RX_PKT_FS_FIELD) |
+		   (1 << NETSEC_RX_PKT_LS_FIELD);
+
+	if (idx == DESC_NUM - 1)
+		attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
+
+	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+	de->buf_len_info = desc->len;
+	de->attr = attr;
+	dma_wmb();
+
+	dring->desc[idx].dma_addr = desc->dma_addr;
+	dring->desc[idx].addr = desc->addr;
+	dring->desc[idx].len = desc->len;
+	dring->desc[idx].skb = skb;
+}
+
+static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
+					struct netsec_desc_ring *dring,
+					u16 idx,
+					struct netsec_rx_pkt_info *rxpi,
+					struct netsec_desc *desc, u16 *len)
+{
+	struct netsec_de de = {};
+
+	memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
+
+	*len = de.buf_len_info >> 16;
+
+	rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+	rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
+	rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
+							NETSEC_RX_PKT_ERR_MASK;
+	*desc = dring->desc[idx];
+	return desc->skb;
+}
+
+static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
+					      struct netsec_rx_pkt_info *rxpi,
+					      struct netsec_desc *desc,
+					      u16 *len)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+	struct sk_buff *tmp_skb, *skb = NULL;
+	struct netsec_desc td;
+	int tail;
+
+	*rxpi = (struct netsec_rx_pkt_info){};
+
+	td.len = priv->ndev->mtu + 22;
+
+	tmp_skb = netsec_alloc_skb(priv, &td);
+
+	dma_rmb();
+
+	tail = dring->tail;
+
+	if (!tmp_skb) {
+		netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
+				 dring->desc[tail].skb);
+	} else {
+		skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
+		netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
+	}
+
+	/* move tail ahead */
+	dring->tail = (dring->tail + 1) % DESC_NUM;
+
+	dring->pkt_cnt--;
+
+	return skb;
+}
+
+static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+	unsigned int pkts, bytes;
+
+	dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+	if (dring->pkt_cnt < budget)
+		budget = dring->pkt_cnt;
+
+	pkts = 0;
+	bytes = 0;
+
+	while (pkts < budget) {
+		struct netsec_desc *desc;
+		struct netsec_de *entry;
+		int tail, eop;
+
+		tail = dring->tail;
+
+		/* move tail ahead */
+		dring->tail = (tail + 1) % DESC_NUM;
+
+		desc = &dring->desc[tail];
+		entry = dring->vaddr + DESC_SZ * tail;
+
+		eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+
+		dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+				 DMA_TO_DEVICE);
+		if (eop) {
+			pkts++;
+			bytes += desc->skb->len;
+			dev_kfree_skb(desc->skb);
+		}
+		*desc = (struct netsec_desc){};
+	}
+	dring->pkt_cnt -= budget;
+
+	priv->ndev->stats.tx_packets += budget;
+	priv->ndev->stats.tx_bytes += bytes;
+
+	netdev_completed_queue(priv->ndev, budget, bytes);
+
+	return budget;
+}
+
+static int netsec_process_tx(struct netsec_priv *priv, int budget)
+{
+	struct net_device *ndev = priv->ndev;
+	int new, done = 0;
+
+	do {
+		new = netsec_clean_tx_dring(priv, budget);
+		done += new;
+		budget -= new;
+	} while (new);
+
+	if (done && netif_queue_stopped(ndev))
+		netif_wake_queue(ndev);
+
+	return done;
+}
+
+static int netsec_process_rx(struct netsec_priv *priv, int budget)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+	struct net_device *ndev = priv->ndev;
+	struct netsec_rx_pkt_info rx_info;
+	int done = 0, rx_num = 0;
+	struct netsec_desc desc;
+	struct sk_buff *skb;
+	u16 len;
+
+	while (done < budget) {
+		if (!rx_num) {
+			rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
+			dring->pkt_cnt += rx_num;
+
+			/* move head 'rx_num' */
+			dring->head = (dring->head + rx_num) % DESC_NUM;
+
+			rx_num = dring->pkt_cnt;
+			if (!rx_num)
+				break;
+		}
+		done++;
+		rx_num--;
+		skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
+		if (unlikely(!skb) || rx_info.err_flag) {
+			netif_err(priv, drv, priv->ndev,
+				  "%s: rx fail err(%d)\n",
+				  __func__, rx_info.err_code);
+			ndev->stats.rx_dropped++;
+			continue;
+		}
+
+		dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
+				 DMA_FROM_DEVICE);
+		skb_put(skb, len);
+		skb->protocol = eth_type_trans(skb, priv->ndev);
+
+		if (priv->rx_cksum_offload_flag &&
+		    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
+			ndev->stats.rx_packets++;
+			ndev->stats.rx_bytes += len;
+		}
+	}
+
+	return done;
+}
+
+static int netsec_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct netsec_priv *priv;
+	struct net_device *ndev;
+	int tx, rx, done, todo;
+
+	priv = container_of(napi, struct netsec_priv, napi);
+	ndev = priv->ndev;
+
+	todo = budget;
+	do {
+		if (!todo)
+			break;
+
+		tx = netsec_process_tx(priv, todo);
+		todo -= tx;
+
+		if (!todo)
+			break;
+
+		rx = netsec_process_rx(priv, todo);
+		todo -= rx;
+	} while (rx || tx);
+
+	done = budget - todo;
+
+	if (done < budget && napi_complete_done(napi, done)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->reglock, flags);
+		netsec_write(priv, NETSEC_REG_INTEN_SET,
+			     NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+		spin_unlock_irqrestore(&priv->reglock, flags);
+	}
+
+	return done;
+}
+
+static void netsec_set_tx_de(struct netsec_priv *priv,
+			     struct netsec_desc_ring *dring,
+			     const struct netsec_tx_pkt_ctrl *tx_ctrl,
+			     const struct netsec_desc *desc,
+			     struct sk_buff *skb)
+{
+	int idx = dring->head;
+	struct netsec_de *de;
+	u32 attr;
+
+	de = dring->vaddr + (DESC_SZ * idx);
+
+	attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
+	       (1 << NETSEC_TX_SHIFT_PT_FIELD) |
+	       (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
+	       (1 << NETSEC_TX_SHIFT_FS_FIELD) |
+	       (1 << NETSEC_TX_LAST) |
+	       (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
+	       (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
+	       (1 << NETSEC_TX_SHIFT_TRS_FIELD);
+	if (idx == DESC_NUM - 1)
+		attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
+
+	de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
+	de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
+	de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
+	de->attr = attr;
+	dma_wmb();
+
+	dring->desc[idx] = *desc;
+	dring->desc[idx].skb = skb;
+
+	/* move head ahead */
+	dring->head = (dring->head + 1) % DESC_NUM;
+}
+
+static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
+					    struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+	struct netsec_tx_pkt_ctrl tx_ctrl = {};
+	struct netsec_desc tx_desc;
+	u16 tso_seg_len = 0;
+	int filled;
+
+	/* differentiate between full/emtpy ring */
+	if (dring->head >= dring->tail)
+		filled = dring->head - dring->tail;
+	else
+		filled = dring->head + DESC_NUM - dring->tail;
+
+	if (DESC_NUM - filled < 2) { /* if less than 2 available */
+		netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
+		netif_stop_queue(priv->ndev);
+		dma_wmb();
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		tx_ctrl.cksum_offload_flag = true;
+
+	if (skb_is_gso(skb))
+		tso_seg_len = skb_shinfo(skb)->gso_size;
+
+	if (tso_seg_len > 0) {
+		if (skb->protocol == htons(ETH_P_IP)) {
+			ip_hdr(skb)->tot_len = 0;
+			tcp_hdr(skb)->check =
+				~tcp_v4_check(0, ip_hdr(skb)->saddr,
+					      ip_hdr(skb)->daddr, 0);
+		} else {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0);
+		}
+
+		tx_ctrl.tcp_seg_offload_flag = true;
+		tx_ctrl.tcp_seg_len = tso_seg_len;
+	}
+
+	tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
+					  skb_headlen(skb), DMA_TO_DEVICE);
+	if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
+		netif_err(priv, drv, priv->ndev,
+			  "%s: DMA mapping failed\n", __func__);
+		ndev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	tx_desc.addr = skb->data;
+	tx_desc.len = skb_headlen(skb);
+
+	skb_tx_timestamp(skb);
+	netdev_sent_queue(priv->ndev, skb->len);
+
+	netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
+	netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
+
+	return NETDEV_TX_OK;
+}
+
+static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[id];
+	struct netsec_desc *desc;
+	u16 idx;
+
+	if (!dring->vaddr || !dring->desc)
+		return;
+
+	for (idx = 0; idx < DESC_NUM; idx++) {
+		desc = &dring->desc[idx];
+		if (!desc->addr)
+			continue;
+
+		dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
+				 id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
+							      DMA_TO_DEVICE);
+		dev_kfree_skb(desc->skb);
+	}
+
+	memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
+	memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
+
+	dring->head = 0;
+	dring->tail = 0;
+	dring->pkt_cnt = 0;
+}
+
+static void netsec_free_dring(struct netsec_priv *priv, int id)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[id];
+
+	if (dring->vaddr) {
+		dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
+				  dring->vaddr, dring->desc_phys);
+		dring->vaddr = NULL;
+	}
+
+	kfree(dring->desc);
+	dring->desc = NULL;
+}
+
+static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[id];
+	int ret = 0;
+
+	dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+					   &dring->desc_phys, GFP_KERNEL);
+	if (!dring->vaddr) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL);
+	if (!dring->desc) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	return 0;
+err:
+	netsec_free_dring(priv, id);
+
+	return ret;
+}
+
+static int netsec_setup_rx_dring(struct netsec_priv *priv)
+{
+	struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+	struct netsec_desc desc;
+	struct sk_buff *skb;
+	int n;
+
+	desc.len = priv->ndev->mtu + 22;
+
+	for (n = 0; n < DESC_NUM; n++) {
+		skb = netsec_alloc_skb(priv, &desc);
+		if (!skb) {
+			netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+			return -ENOMEM;
+		}
+		netsec_set_rx_de(priv, dring, n, &desc, skb);
+	}
+
+	return 0;
+}
+
+static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
+					   u32 addr_h, u32 addr_l, u32 size)
+{
+	u64 base = (u64)addr_h << 32 | addr_l;
+	void __iomem *ucode;
+	u32 i;
+
+	ucode = ioremap(base, size * sizeof(u32));
+	if (!ucode)
+		return -ENOMEM;
+
+	for (i = 0; i < size; i++)
+		netsec_write(priv, reg, readl(ucode + i * 4));
+
+	iounmap(ucode);
+	return 0;
+}
+
+static int netsec_netdev_load_microcode(struct netsec_priv *priv)
+{
+	u32 addr_h, addr_l, size;
+	int err;
+
+	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
+	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
+	size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
+	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
+					      addr_h, addr_l, size);
+	if (err)
+		return err;
+
+	addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
+	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
+	size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
+	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
+					      addr_h, addr_l, size);
+	if (err)
+		return err;
+
+	addr_h = 0;
+	addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
+	size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
+	err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
+					      addr_h, addr_l, size);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int netsec_reset_hardware(struct netsec_priv *priv)
+{
+	u32 value;
+	int err;
+
+	/* stop DMA engines */
+	if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
+		netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
+			     NETSEC_DMA_CTRL_REG_STOP);
+		netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
+			     NETSEC_DMA_CTRL_REG_STOP);
+
+		while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
+		       NETSEC_DMA_CTRL_REG_STOP)
+			cpu_relax();
+
+		while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
+		       NETSEC_DMA_CTRL_REG_STOP)
+			cpu_relax();
+	}
+
+	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
+	netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
+	netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
+
+	while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
+		cpu_relax();
+
+	/* set desc_start addr */
+	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
+		     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_phys));
+	netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
+		     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_phys));
+
+	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
+		     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_phys));
+	netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
+		     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_phys));
+
+	/* set normal tx dring ring config */
+	netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
+		     1 << NETSEC_REG_DESC_ENDIAN);
+	netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
+		     1 << NETSEC_REG_DESC_ENDIAN);
+
+	err = netsec_netdev_load_microcode(priv);
+	if (err) {
+		netif_err(priv, probe, priv->ndev,
+			  "%s: failed to load microcode (%d)\n", __func__, err);
+		return err;
+	}
+
+	/* start DMA engines */
+	netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
+	netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
+
+	usleep_range(1000, 2000);
+
+	if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
+	      NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
+		netif_err(priv, probe, priv->ndev,
+			  "microengine start failed\n");
+		return -ENXIO;
+	}
+	netsec_write(priv, NETSEC_REG_TOP_STATUS,
+		     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
+
+	value = NETSEC_PKT_CTRL_REG_MODE_NRM;
+	if (priv->ndev->mtu > ETH_DATA_LEN)
+		value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
+
+	/* change to normal mode */
+	netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
+	netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
+
+	while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
+		NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
+		cpu_relax();
+
+	/* clear any pending EMPTY/ERR irq status */
+	netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
+
+	/* Disable TX & RX intr */
+	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+
+	return 0;
+}
+
+static int netsec_start_gmac(struct netsec_priv *priv)
+{
+	struct phy_device *phydev = priv->ndev->phydev;
+	u32 value = 0;
+	int ret;
+
+	if (phydev->speed != SPEED_1000)
+		value = (NETSEC_GMAC_MCR_REG_CST |
+			 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
+
+	if (netsec_mac_write(priv, GMAC_REG_MCR, value))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_BMR,
+			     NETSEC_GMAC_BMR_REG_RESET))
+		return -ETIMEDOUT;
+
+	/* Wait soft reset */
+	usleep_range(1000, 5000);
+
+	ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
+	if (ret)
+		return ret;
+	if (value & NETSEC_GMAC_BMR_REG_SWR)
+		return -EAGAIN;
+
+	netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
+	if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
+		return -ETIMEDOUT;
+
+	netsec_write(priv, MAC_REG_DESC_INIT, 1);
+	if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
+		return -ETIMEDOUT;
+
+	if (netsec_mac_write(priv, GMAC_REG_BMR,
+			     NETSEC_GMAC_BMR_REG_COMMON))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_RDLAR,
+			     NETSEC_GMAC_RDLAR_REG_COMMON))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_TDLAR,
+			     NETSEC_GMAC_TDLAR_REG_COMMON))
+		return -ETIMEDOUT;
+	if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
+		return -ETIMEDOUT;
+
+	ret = netsec_mac_update_to_phy_state(priv);
+	if (ret)
+		return ret;
+
+	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+	if (ret)
+		return ret;
+
+	value |= NETSEC_GMAC_OMR_REG_SR;
+	value |= NETSEC_GMAC_OMR_REG_ST;
+
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+	netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
+
+	if (netsec_mac_write(priv, GMAC_REG_OMR, value))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int netsec_stop_gmac(struct netsec_priv *priv)
+{
+	u32 value;
+	int ret;
+
+	ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
+	if (ret)
+		return ret;
+	value &= ~NETSEC_GMAC_OMR_REG_SR;
+	value &= ~NETSEC_GMAC_OMR_REG_ST;
+
+	/* disable all interrupts */
+	netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
+	netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
+
+	return netsec_mac_write(priv, GMAC_REG_OMR, value);
+}
+
+static void netsec_phy_adjust_link(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	if (ndev->phydev->link)
+		netsec_start_gmac(priv);
+	else
+		netsec_stop_gmac(priv);
+
+	phy_print_status(ndev->phydev);
+}
+
+static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
+{
+	struct netsec_priv *priv = dev_id;
+	u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
+	unsigned long flags;
+
+	/* Disable interrupts */
+	if (status & NETSEC_IRQ_TX) {
+		val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
+		netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
+	}
+	if (status & NETSEC_IRQ_RX) {
+		val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
+		netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
+	}
+
+	spin_lock_irqsave(&priv->reglock, flags);
+	netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
+	spin_unlock_irqrestore(&priv->reglock, flags);
+
+	napi_schedule(&priv->napi);
+
+	return IRQ_HANDLED;
+}
+
+static int netsec_netdev_open(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+	int ret;
+
+	pm_runtime_get_sync(priv->dev);
+
+	ret = netsec_setup_rx_dring(priv);
+	if (ret) {
+		netif_err(priv, probe, priv->ndev,
+			  "%s: fail setup ring\n", __func__);
+		goto err1;
+	}
+
+	ret = request_irq(priv->ndev->irq, netsec_irq_handler,
+			  IRQF_SHARED, "netsec", priv);
+	if (ret) {
+		netif_err(priv, drv, priv->ndev, "request_irq failed\n");
+		goto err2;
+	}
+
+	if (dev_of_node(priv->dev)) {
+		if (!of_phy_connect(priv->ndev, priv->phy_np,
+				    netsec_phy_adjust_link, 0,
+				    priv->phy_interface)) {
+			netif_err(priv, link, priv->ndev, "missing PHY\n");
+			ret = -ENODEV;
+			goto err3;
+		}
+	} else {
+		ret = phy_connect_direct(priv->ndev, priv->phydev,
+					 netsec_phy_adjust_link,
+					 priv->phy_interface);
+		if (ret) {
+			netif_err(priv, link, priv->ndev,
+				  "phy_connect_direct() failed (%d)\n", ret);
+			goto err3;
+		}
+	}
+
+	phy_start(ndev->phydev);
+
+	netsec_start_gmac(priv);
+	napi_enable(&priv->napi);
+	netif_start_queue(ndev);
+
+	/* Enable RX intr. */
+	netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX);
+
+	return 0;
+err3:
+	free_irq(priv->ndev->irq, priv);
+err2:
+	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+err1:
+	pm_runtime_put_sync(priv->dev);
+	return ret;
+}
+
+static int netsec_netdev_stop(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	netif_stop_queue(priv->ndev);
+	dma_wmb();
+
+	napi_disable(&priv->napi);
+
+	netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
+	netsec_stop_gmac(priv);
+
+	free_irq(priv->ndev->irq, priv);
+
+	netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
+	netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
+
+	phy_stop(ndev->phydev);
+	phy_disconnect(ndev->phydev);
+
+	pm_runtime_put_sync(priv->dev);
+
+	return 0;
+}
+
+static int netsec_netdev_init(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+	int ret;
+
+	ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
+	if (ret)
+		return ret;
+
+	ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
+	if (ret)
+		goto err1;
+
+	ret = netsec_reset_hardware(priv);
+	if (ret)
+		goto err2;
+
+	return 0;
+err2:
+	netsec_free_dring(priv, NETSEC_RING_RX);
+err1:
+	netsec_free_dring(priv, NETSEC_RING_TX);
+	return ret;
+}
+
+static void netsec_netdev_uninit(struct net_device *ndev)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	netsec_free_dring(priv, NETSEC_RING_RX);
+	netsec_free_dring(priv, NETSEC_RING_TX);
+}
+
+static int netsec_netdev_set_features(struct net_device *ndev,
+				      netdev_features_t features)
+{
+	struct netsec_priv *priv = netdev_priv(ndev);
+
+	priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
+
+	return 0;
+}
+
+static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
+			       int cmd)
+{
+	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops netsec_netdev_ops = {
+	.ndo_init		= netsec_netdev_init,
+	.ndo_uninit		= netsec_netdev_uninit,
+	.ndo_open		= netsec_netdev_open,
+	.ndo_stop		= netsec_netdev_stop,
+	.ndo_start_xmit		= netsec_netdev_start_xmit,
+	.ndo_set_features	= netsec_netdev_set_features,
+	.ndo_set_mac_address    = eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= netsec_netdev_ioctl,
+};
+
+static int netsec_of_probe(struct platform_device *pdev,
+			   struct netsec_priv *priv)
+{
+	priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+	if (!priv->phy_np) {
+		dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
+		return -EINVAL;
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "phy_ref_clk not found\n");
+		return PTR_ERR(priv->clk);
+	}
+	priv->freq = clk_get_rate(priv->clk);
+
+	return 0;
+}
+
+static int netsec_acpi_probe(struct platform_device *pdev,
+			     struct netsec_priv *priv, u32 *phy_addr)
+{
+	int ret;
+
+	if (!IS_ENABLED(CONFIG_ACPI))
+		return -ENODEV;
+
+	ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"missing required property 'phy-channel'\n");
+		return ret;
+	}
+
+	ret = device_property_read_u32(&pdev->dev,
+				       "socionext,phy-clock-frequency",
+				       &priv->freq);
+	if (ret)
+		dev_err(&pdev->dev,
+			"missing required property 'socionext,phy-clock-frequency'\n");
+	return ret;
+}
+
+static void netsec_unregister_mdio(struct netsec_priv *priv)
+{
+	struct phy_device *phydev = priv->phydev;
+
+	if (!dev_of_node(priv->dev) && phydev) {
+		phy_device_remove(phydev);
+		phy_device_free(phydev);
+	}
+
+	mdiobus_unregister(priv->mii_bus);
+}
+
+static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
+{
+	struct mii_bus *bus;
+	int ret;
+
+	bus = devm_mdiobus_alloc(priv->dev);
+	if (!bus)
+		return -ENOMEM;
+
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
+	bus->priv = priv;
+	bus->name = "SNI NETSEC MDIO";
+	bus->read = netsec_phy_read;
+	bus->write = netsec_phy_write;
+	bus->parent = priv->dev;
+	priv->mii_bus = bus;
+
+	if (dev_of_node(priv->dev)) {
+		struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
+
+		mdio_node = of_get_child_by_name(parent, "mdio");
+		if (mdio_node) {
+			parent = mdio_node;
+		} else {
+			/* older f/w doesn't populate the mdio subnode,
+			 * allow relaxed upgrade of f/w in due time.
+			 */
+			dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
+		}
+
+		ret = of_mdiobus_register(bus, parent);
+		of_node_put(mdio_node);
+
+		if (ret) {
+			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+			return ret;
+		}
+	} else {
+		/* Mask out all PHYs from auto probing. */
+		bus->phy_mask = ~0;
+		ret = mdiobus_register(bus);
+		if (ret) {
+			dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
+			return ret;
+		}
+
+		priv->phydev = get_phy_device(bus, phy_addr, false);
+		if (IS_ERR(priv->phydev)) {
+			ret = PTR_ERR(priv->phydev);
+			dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
+			priv->phydev = NULL;
+			return -ENODEV;
+		}
+
+		ret = phy_device_register(priv->phydev);
+		if (ret) {
+			mdiobus_unregister(bus);
+			dev_err(priv->dev,
+				"phy_device_register err(%d)\n", ret);
+		}
+	}
+
+	return ret;
+}
+
+static int netsec_probe(struct platform_device *pdev)
+{
+	struct resource *mmio_res, *eeprom_res, *irq_res;
+	u8 *mac, macbuf[ETH_ALEN];
+	struct netsec_priv *priv;
+	u32 hw_ver, phy_addr = 0;
+	struct net_device *ndev;
+	int ret;
+
+	mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mmio_res) {
+		dev_err(&pdev->dev, "No MMIO resource found.\n");
+		return -ENODEV;
+	}
+
+	eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!eeprom_res) {
+		dev_info(&pdev->dev, "No EEPROM resource found.\n");
+		return -ENODEV;
+	}
+
+	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq_res) {
+		dev_err(&pdev->dev, "No IRQ resource found.\n");
+		return -ENODEV;
+	}
+
+	ndev = alloc_etherdev(sizeof(*priv));
+	if (!ndev)
+		return -ENOMEM;
+
+	priv = netdev_priv(ndev);
+
+	spin_lock_init(&priv->reglock);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+	platform_set_drvdata(pdev, priv);
+	ndev->irq = irq_res->start;
+	priv->dev = &pdev->dev;
+	priv->ndev = ndev;
+
+	priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
+			   NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+	priv->phy_interface = device_get_phy_mode(&pdev->dev);
+	if (priv->phy_interface < 0) {
+		dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+		ret = -ENODEV;
+		goto free_ndev;
+	}
+
+	priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
+				    resource_size(mmio_res));
+	if (!priv->ioaddr) {
+		dev_err(&pdev->dev, "devm_ioremap() failed\n");
+		ret = -ENXIO;
+		goto free_ndev;
+	}
+
+	priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
+					 resource_size(eeprom_res));
+	if (!priv->eeprom_base) {
+		dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
+		ret = -ENXIO;
+		goto free_ndev;
+	}
+
+	mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
+	if (mac)
+		ether_addr_copy(ndev->dev_addr, mac);
+
+	if (priv->eeprom_base &&
+	    (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+		void __iomem *macp = priv->eeprom_base +
+					NETSEC_EEPROM_MAC_ADDRESS;
+
+		ndev->dev_addr[0] = readb(macp + 3);
+		ndev->dev_addr[1] = readb(macp + 2);
+		ndev->dev_addr[2] = readb(macp + 1);
+		ndev->dev_addr[3] = readb(macp + 0);
+		ndev->dev_addr[4] = readb(macp + 7);
+		ndev->dev_addr[5] = readb(macp + 6);
+	}
+
+	if (!is_valid_ether_addr(ndev->dev_addr)) {
+		dev_warn(&pdev->dev, "No MAC address found, using random\n");
+		eth_hw_addr_random(ndev);
+	}
+
+	if (dev_of_node(&pdev->dev))
+		ret = netsec_of_probe(pdev, priv);
+	else
+		ret = netsec_acpi_probe(pdev, priv, &phy_addr);
+	if (ret)
+		goto free_ndev;
+
+	if (!priv->freq) {
+		dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
+		ret = -ENODEV;
+		goto free_ndev;
+	}
+
+	/* default for throughput */
+	priv->et_coalesce.rx_coalesce_usecs = 500;
+	priv->et_coalesce.rx_max_coalesced_frames = 8;
+	priv->et_coalesce.tx_coalesce_usecs = 500;
+	priv->et_coalesce.tx_max_coalesced_frames = 8;
+
+	ret = device_property_read_u32(&pdev->dev, "max-frame-size",
+				       &ndev->max_mtu);
+	if (ret < 0)
+		ndev->max_mtu = ETH_DATA_LEN;
+
+	/* runtime_pm coverage just for probe, open/close also cover it */
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
+	hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
+	/* this driver only supports F_TAIKI style NETSEC */
+	if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
+	    NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
+		ret = -ENODEV;
+		goto pm_disable;
+	}
+
+	dev_info(&pdev->dev, "hardware revision %d.%d\n",
+		 hw_ver >> 16, hw_ver & 0xffff);
+
+	netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
+
+	ndev->netdev_ops = &netsec_netdev_ops;
+	ndev->ethtool_ops = &netsec_ethtool_ops;
+
+	ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
+				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+	ndev->hw_features = ndev->features;
+
+	priv->rx_cksum_offload_flag = true;
+
+	ret = netsec_register_mdio(priv, phy_addr);
+	if (ret)
+		goto unreg_napi;
+
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+		dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		netif_err(priv, probe, ndev, "register_netdev() failed\n");
+		goto unreg_mii;
+	}
+
+	pm_runtime_put_sync(&pdev->dev);
+	return 0;
+
+unreg_mii:
+	netsec_unregister_mdio(priv);
+unreg_napi:
+	netif_napi_del(&priv->napi);
+pm_disable:
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+free_ndev:
+	free_netdev(ndev);
+	dev_err(&pdev->dev, "init failed\n");
+
+	return ret;
+}
+
+static int netsec_remove(struct platform_device *pdev)
+{
+	struct netsec_priv *priv = platform_get_drvdata(pdev);
+
+	unregister_netdev(priv->ndev);
+
+	netsec_unregister_mdio(priv);
+
+	netif_napi_del(&priv->napi);
+
+	pm_runtime_disable(&pdev->dev);
+	free_netdev(priv->ndev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int netsec_runtime_suspend(struct device *dev)
+{
+	struct netsec_priv *priv = dev_get_drvdata(dev);
+
+	netsec_write(priv, NETSEC_REG_CLK_EN, 0);
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static int netsec_runtime_resume(struct device *dev)
+{
+	struct netsec_priv *priv = dev_get_drvdata(dev);
+
+	clk_prepare_enable(priv->clk);
+
+	netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
+					       NETSEC_CLK_EN_REG_DOM_C |
+					       NETSEC_CLK_EN_REG_DOM_G);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops netsec_pm_ops = {
+	SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
+};
+
+static const struct of_device_id netsec_dt_ids[] = {
+	{ .compatible = "socionext,synquacer-netsec" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, netsec_dt_ids);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id netsec_acpi_ids[] = {
+	{ "SCX0001" },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
+#endif
+
+static struct platform_driver netsec_driver = {
+	.probe	= netsec_probe,
+	.remove	= netsec_remove,
+	.driver = {
+		.name = "netsec",
+		.pm = &netsec_pm_ops,
+		.of_match_table = netsec_dt_ids,
+		.acpi_match_table = ACPI_PTR(netsec_acpi_ids),
+	},
+};
+module_platform_driver(netsec_driver);
+
+MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_DESCRIPTION("NETSEC Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
new file mode 100644
index 0000000..111e7ca
--- /dev/null
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -0,0 +1,1736 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * sni_ave.c - Socionext UniPhier AVE ethernet driver
+ * Copyright 2014 Panasonic Corporation
+ * Copyright 2015-2017 Socionext Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/u64_stats_sync.h>
+
+/* General Register Group */
+#define AVE_IDR			0x000	/* ID */
+#define AVE_VR			0x004	/* Version */
+#define AVE_GRR			0x008	/* Global Reset */
+#define AVE_CFGR		0x00c	/* Configuration */
+
+/* Interrupt Register Group */
+#define AVE_GIMR		0x100	/* Global Interrupt Mask */
+#define AVE_GISR		0x104	/* Global Interrupt Status */
+
+/* MAC Register Group */
+#define AVE_TXCR		0x200	/* TX Setup */
+#define AVE_RXCR		0x204	/* RX Setup */
+#define AVE_RXMAC1R		0x208	/* MAC address (lower) */
+#define AVE_RXMAC2R		0x20c	/* MAC address (upper) */
+#define AVE_MDIOCTR		0x214	/* MDIO Control */
+#define AVE_MDIOAR		0x218	/* MDIO Address */
+#define AVE_MDIOWDR		0x21c	/* MDIO Data */
+#define AVE_MDIOSR		0x220	/* MDIO Status */
+#define AVE_MDIORDR		0x224	/* MDIO Rd Data */
+
+/* Descriptor Control Register Group */
+#define AVE_DESCC		0x300	/* Descriptor Control */
+#define AVE_TXDC		0x304	/* TX Descriptor Configuration */
+#define AVE_RXDC0		0x308	/* RX Descriptor Ring0 Configuration */
+#define AVE_IIRQC		0x34c	/* Interval IRQ Control */
+
+/* Packet Filter Register Group */
+#define AVE_PKTF_BASE		0x800	/* PF Base Address */
+#define AVE_PFMBYTE_BASE	0xd00	/* PF Mask Byte Base Address */
+#define AVE_PFMBIT_BASE		0xe00	/* PF Mask Bit Base Address */
+#define AVE_PFSEL_BASE		0xf00	/* PF Selector Base Address */
+#define AVE_PFEN		0xffc	/* Packet Filter Enable */
+#define AVE_PKTF(ent)		(AVE_PKTF_BASE + (ent) * 0x40)
+#define AVE_PFMBYTE(ent)	(AVE_PFMBYTE_BASE + (ent) * 8)
+#define AVE_PFMBIT(ent)		(AVE_PFMBIT_BASE + (ent) * 4)
+#define AVE_PFSEL(ent)		(AVE_PFSEL_BASE + (ent) * 4)
+
+/* 64bit descriptor memory */
+#define AVE_DESC_SIZE_64	12	/* Descriptor Size */
+
+#define AVE_TXDM_64		0x1000	/* Tx Descriptor Memory */
+#define AVE_RXDM_64		0x1c00	/* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_64	0x0ba0	/* Tx Descriptor Memory Size 3KB */
+#define AVE_RXDM_SIZE_64	0x6000	/* Rx Descriptor Memory Size 24KB */
+
+/* 32bit descriptor memory */
+#define AVE_DESC_SIZE_32	8	/* Descriptor Size */
+
+#define AVE_TXDM_32		0x1000	/* Tx Descriptor Memory */
+#define AVE_RXDM_32		0x1800	/* Rx Descriptor Memory */
+
+#define AVE_TXDM_SIZE_32	0x07c0	/* Tx Descriptor Memory Size 2KB */
+#define AVE_RXDM_SIZE_32	0x4000	/* Rx Descriptor Memory Size 16KB */
+
+/* RMII Bridge Register Group */
+#define AVE_RSTCTRL		0x8028	/* Reset control */
+#define AVE_RSTCTRL_RMIIRST	BIT(16)
+#define AVE_LINKSEL		0x8034	/* Link speed setting */
+#define AVE_LINKSEL_100M	BIT(0)
+
+/* AVE_GRR */
+#define AVE_GRR_RXFFR		BIT(5)	/* Reset RxFIFO */
+#define AVE_GRR_PHYRST		BIT(4)	/* Reset external PHY */
+#define AVE_GRR_GRST		BIT(0)	/* Reset all MAC */
+
+/* AVE_CFGR */
+#define AVE_CFGR_FLE		BIT(31)	/* Filter Function */
+#define AVE_CFGR_CHE		BIT(30)	/* Checksum Function */
+#define AVE_CFGR_MII		BIT(27)	/* Func mode (1:MII/RMII, 0:RGMII) */
+#define AVE_CFGR_IPFCEN		BIT(24)	/* IP fragment sum Enable */
+
+/* AVE_GISR (common with GIMR) */
+#define AVE_GI_PHY		BIT(24)	/* PHY interrupt */
+#define AVE_GI_TX		BIT(16)	/* Tx complete */
+#define AVE_GI_RXERR		BIT(8)	/* Receive frame more than max size */
+#define AVE_GI_RXOVF		BIT(7)	/* Overflow at the RxFIFO */
+#define AVE_GI_RXDROP		BIT(6)	/* Drop packet */
+#define AVE_GI_RXIINT		BIT(5)	/* Interval interrupt */
+
+/* AVE_TXCR */
+#define AVE_TXCR_FLOCTR		BIT(18)	/* Flow control */
+#define AVE_TXCR_TXSPD_1G	BIT(17)
+#define AVE_TXCR_TXSPD_100	BIT(16)
+
+/* AVE_RXCR */
+#define AVE_RXCR_RXEN		BIT(30)	/* Rx enable */
+#define AVE_RXCR_FDUPEN		BIT(22)	/* Interface mode */
+#define AVE_RXCR_FLOCTR		BIT(21)	/* Flow control */
+#define AVE_RXCR_AFEN		BIT(19)	/* MAC address filter */
+#define AVE_RXCR_DRPEN		BIT(18)	/* Drop pause frame */
+#define AVE_RXCR_MPSIZ_MASK	GENMASK(10, 0)
+
+/* AVE_MDIOCTR */
+#define AVE_MDIOCTR_RREQ	BIT(3)	/* Read request */
+#define AVE_MDIOCTR_WREQ	BIT(2)	/* Write request */
+
+/* AVE_MDIOSR */
+#define AVE_MDIOSR_STS		BIT(0)	/* access status */
+
+/* AVE_DESCC */
+#define AVE_DESCC_STATUS_MASK	GENMASK(31, 16)
+#define AVE_DESCC_RD0		BIT(8)	/* Enable Rx descriptor Ring0 */
+#define AVE_DESCC_RDSTP		BIT(4)	/* Pause Rx descriptor */
+#define AVE_DESCC_TD		BIT(0)	/* Enable Tx descriptor */
+
+/* AVE_TXDC */
+#define AVE_TXDC_SIZE		GENMASK(27, 16)	/* Size of Tx descriptor */
+#define AVE_TXDC_ADDR		GENMASK(11, 0)	/* Start address */
+#define AVE_TXDC_ADDR_START	0
+
+/* AVE_RXDC0 */
+#define AVE_RXDC0_SIZE		GENMASK(30, 16)	/* Size of Rx descriptor */
+#define AVE_RXDC0_ADDR		GENMASK(14, 0)	/* Start address */
+#define AVE_RXDC0_ADDR_START	0
+
+/* AVE_IIRQC */
+#define AVE_IIRQC_EN0		BIT(27)	/* Enable interval interrupt Ring0 */
+#define AVE_IIRQC_BSCK		GENMASK(15, 0)	/* Interval count unit */
+
+/* Command status for descriptor */
+#define AVE_STS_OWN		BIT(31)	/* Descriptor ownership */
+#define AVE_STS_INTR		BIT(29)	/* Request for interrupt */
+#define AVE_STS_OK		BIT(27)	/* Normal transmit */
+/* TX */
+#define AVE_STS_NOCSUM		BIT(28)	/* No use HW checksum */
+#define AVE_STS_1ST		BIT(26)	/* Head of buffer chain */
+#define AVE_STS_LAST		BIT(25)	/* Tail of buffer chain */
+#define AVE_STS_OWC		BIT(21)	/* Out of window,Late Collision */
+#define AVE_STS_EC		BIT(20)	/* Excess collision occurred */
+#define AVE_STS_PKTLEN_TX_MASK	GENMASK(15, 0)
+/* RX */
+#define AVE_STS_CSSV		BIT(21)	/* Checksum check performed */
+#define AVE_STS_CSER		BIT(20)	/* Checksum error detected */
+#define AVE_STS_PKTLEN_RX_MASK	GENMASK(10, 0)
+
+/* Packet filter */
+#define AVE_PFMBYTE_MASK0	(GENMASK(31, 8) | GENMASK(5, 0))
+#define AVE_PFMBYTE_MASK1	GENMASK(25, 0)
+#define AVE_PFMBIT_MASK		GENMASK(15, 0)
+
+#define AVE_PF_SIZE		17	/* Number of all packet filter */
+#define AVE_PF_MULTICAST_SIZE	7	/* Number of multicast filter */
+
+#define AVE_PFNUM_FILTER	0	/* No.0 */
+#define AVE_PFNUM_UNICAST	1	/* No.1 */
+#define AVE_PFNUM_BROADCAST	2	/* No.2 */
+#define AVE_PFNUM_MULTICAST	11	/* No.11-17 */
+
+/* NETIF Message control */
+#define AVE_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV    |	\
+				 NETIF_MSG_PROBE  |	\
+				 NETIF_MSG_LINK   |	\
+				 NETIF_MSG_TIMER  |	\
+				 NETIF_MSG_IFDOWN |	\
+				 NETIF_MSG_IFUP   |	\
+				 NETIF_MSG_RX_ERR |	\
+				 NETIF_MSG_TX_ERR)
+
+/* Parameter for descriptor */
+#define AVE_NR_TXDESC		32	/* Tx descriptor */
+#define AVE_NR_RXDESC		64	/* Rx descriptor */
+
+#define AVE_DESC_OFS_CMDSTS	0
+#define AVE_DESC_OFS_ADDRL	4
+#define AVE_DESC_OFS_ADDRU	8
+
+/* Parameter for ethernet frame */
+#define AVE_MAX_ETHFRAME	1518
+
+/* Parameter for interrupt */
+#define AVE_INTM_COUNT		20
+#define AVE_FORCE_TXINTCNT	1
+
+#define IS_DESC_64BIT(p)	((p)->data->is_desc_64bit)
+
+enum desc_id {
+	AVE_DESCID_RX,
+	AVE_DESCID_TX,
+};
+
+enum desc_state {
+	AVE_DESC_RX_PERMIT,
+	AVE_DESC_RX_SUSPEND,
+	AVE_DESC_START,
+	AVE_DESC_STOP,
+};
+
+struct ave_desc {
+	struct sk_buff	*skbs;
+	dma_addr_t	skbs_dma;
+	size_t		skbs_dmalen;
+};
+
+struct ave_desc_info {
+	u32	ndesc;		/* number of descriptor */
+	u32	daddr;		/* start address of descriptor */
+	u32	proc_idx;	/* index of processing packet */
+	u32	done_idx;	/* index of processed packet */
+	struct ave_desc *desc;	/* skb info related descriptor */
+};
+
+struct ave_soc_data {
+	bool	is_desc_64bit;
+};
+
+struct ave_stats {
+	struct	u64_stats_sync	syncp;
+	u64	packets;
+	u64	bytes;
+	u64	errors;
+	u64	dropped;
+	u64	collisions;
+	u64	fifo_errors;
+};
+
+struct ave_private {
+	void __iomem            *base;
+	int                     irq;
+	int			phy_id;
+	unsigned int		desc_size;
+	u32			msg_enable;
+	struct clk		*clk;
+	struct reset_control	*rst;
+	phy_interface_t		phy_mode;
+	struct phy_device	*phydev;
+	struct mii_bus		*mdio;
+
+	/* stats */
+	struct ave_stats	stats_rx;
+	struct ave_stats	stats_tx;
+
+	/* NAPI support */
+	struct net_device	*ndev;
+	struct napi_struct	napi_rx;
+	struct napi_struct	napi_tx;
+
+	/* descriptor */
+	struct ave_desc_info	rx;
+	struct ave_desc_info	tx;
+
+	/* flow control */
+	int pause_auto;
+	int pause_rx;
+	int pause_tx;
+
+	const struct ave_soc_data *data;
+};
+
+static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry,
+			 int offset)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 addr;
+
+	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+		+ entry * priv->desc_size + offset;
+
+	return readl(priv->base + addr);
+}
+
+static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id,
+				int entry)
+{
+	return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS);
+}
+
+static void ave_desc_write(struct net_device *ndev, enum desc_id id,
+			   int entry, int offset, u32 val)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 addr;
+
+	addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr)
+		+ entry * priv->desc_size + offset;
+
+	writel(val, priv->base + addr);
+}
+
+static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id,
+				  int entry, u32 val)
+{
+	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val);
+}
+
+static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id,
+				int entry, dma_addr_t paddr)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL,
+		       lower_32_bits(paddr));
+	if (IS_DESC_64BIT(priv))
+		ave_desc_write(ndev, id,
+			       entry, AVE_DESC_OFS_ADDRU,
+			       upper_32_bits(paddr));
+}
+
+static u32 ave_irq_disable_all(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 ret;
+
+	ret = readl(priv->base + AVE_GIMR);
+	writel(0, priv->base + AVE_GIMR);
+
+	return ret;
+}
+
+static void ave_irq_restore(struct net_device *ndev, u32 val)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	writel(val, priv->base + AVE_GIMR);
+}
+
+static void ave_irq_enable(struct net_device *ndev, u32 bitflag)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR);
+	writel(bitflag, priv->base + AVE_GISR);
+}
+
+static void ave_hw_write_macaddr(struct net_device *ndev,
+				 const unsigned char *mac_addr,
+				 int reg1, int reg2)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	writel(mac_addr[0] | mac_addr[1] << 8 |
+	       mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1);
+	writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2);
+}
+
+static void ave_hw_read_version(struct net_device *ndev, char *buf, int len)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 major, minor, vr;
+
+	vr = readl(priv->base + AVE_VR);
+	major = (vr & GENMASK(15, 8)) >> 8;
+	minor = (vr & GENMASK(7, 0));
+	snprintf(buf, len, "v%u.%u", major, minor);
+}
+
+static void ave_ethtool_get_drvinfo(struct net_device *ndev,
+				    struct ethtool_drvinfo *info)
+{
+	struct device *dev = ndev->dev.parent;
+
+	strlcpy(info->driver, dev->driver->name, sizeof(info->driver));
+	strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info));
+	ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version));
+}
+
+static u32 ave_ethtool_get_msglevel(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	return priv->msg_enable;
+}
+
+static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	priv->msg_enable = val;
+}
+
+static void ave_ethtool_get_wol(struct net_device *ndev,
+				struct ethtool_wolinfo *wol)
+{
+	wol->supported = 0;
+	wol->wolopts   = 0;
+
+	if (ndev->phydev)
+		phy_ethtool_get_wol(ndev->phydev, wol);
+}
+
+static int ave_ethtool_set_wol(struct net_device *ndev,
+			       struct ethtool_wolinfo *wol)
+{
+	int ret;
+
+	if (!ndev->phydev ||
+	    (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)))
+		return -EOPNOTSUPP;
+
+	ret = phy_ethtool_set_wol(ndev->phydev, wol);
+	if (!ret)
+		device_set_wakeup_enable(&ndev->dev, !!wol->wolopts);
+
+	return ret;
+}
+
+static void ave_ethtool_get_pauseparam(struct net_device *ndev,
+				       struct ethtool_pauseparam *pause)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	pause->autoneg  = priv->pause_auto;
+	pause->rx_pause = priv->pause_rx;
+	pause->tx_pause = priv->pause_tx;
+}
+
+static int ave_ethtool_set_pauseparam(struct net_device *ndev,
+				      struct ethtool_pauseparam *pause)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+
+	if (!phydev)
+		return -EINVAL;
+
+	priv->pause_auto = pause->autoneg;
+	priv->pause_rx   = pause->rx_pause;
+	priv->pause_tx   = pause->tx_pause;
+
+	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+	if (pause->rx_pause)
+		phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+	if (pause->tx_pause)
+		phydev->advertising ^= ADVERTISED_Asym_Pause;
+
+	if (pause->autoneg) {
+		if (netif_running(ndev))
+			phy_start_aneg(phydev);
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops ave_ethtool_ops = {
+	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
+	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
+	.get_drvinfo		= ave_ethtool_get_drvinfo,
+	.nway_reset		= phy_ethtool_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_msglevel		= ave_ethtool_get_msglevel,
+	.set_msglevel		= ave_ethtool_set_msglevel,
+	.get_wol		= ave_ethtool_get_wol,
+	.set_wol		= ave_ethtool_set_wol,
+	.get_pauseparam         = ave_ethtool_get_pauseparam,
+	.set_pauseparam         = ave_ethtool_set_pauseparam,
+};
+
+static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum)
+{
+	struct net_device *ndev = bus->priv;
+	struct ave_private *priv;
+	u32 mdioctl, mdiosr;
+	int ret;
+
+	priv = netdev_priv(ndev);
+
+	/* write address */
+	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+	/* read request */
+	mdioctl = readl(priv->base + AVE_MDIOCTR);
+	writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ,
+	       priv->base + AVE_MDIOCTR);
+
+	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+	if (ret) {
+		netdev_err(ndev, "failed to read (phy:%d reg:%x)\n",
+			   phyid, regnum);
+		return ret;
+	}
+
+	return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0);
+}
+
+static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum,
+			     u16 val)
+{
+	struct net_device *ndev = bus->priv;
+	struct ave_private *priv;
+	u32 mdioctl, mdiosr;
+	int ret;
+
+	priv = netdev_priv(ndev);
+
+	/* write address */
+	writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR);
+
+	/* write data */
+	writel(val, priv->base + AVE_MDIOWDR);
+
+	/* write request */
+	mdioctl = readl(priv->base + AVE_MDIOCTR);
+	writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ,
+	       priv->base + AVE_MDIOCTR);
+
+	ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr,
+				 !(mdiosr & AVE_MDIOSR_STS), 20, 2000);
+	if (ret)
+		netdev_err(ndev, "failed to write (phy:%d reg:%x)\n",
+			   phyid, regnum);
+
+	return ret;
+}
+
+static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc,
+		       void *ptr, size_t len, enum dma_data_direction dir,
+		       dma_addr_t *paddr)
+{
+	dma_addr_t map_addr;
+
+	map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir);
+	if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr)))
+		return -ENOMEM;
+
+	desc->skbs_dma = map_addr;
+	desc->skbs_dmalen = len;
+	*paddr = map_addr;
+
+	return 0;
+}
+
+static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc,
+			  enum dma_data_direction dir)
+{
+	if (!desc->skbs_dma)
+		return;
+
+	dma_unmap_single(ndev->dev.parent,
+			 desc->skbs_dma, desc->skbs_dmalen, dir);
+	desc->skbs_dma = 0;
+}
+
+/* Prepare Rx descriptor and memory */
+static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct sk_buff *skb;
+	dma_addr_t paddr;
+	int ret;
+
+	skb = priv->rx.desc[entry].skbs;
+	if (!skb) {
+		skb = netdev_alloc_skb_ip_align(ndev,
+						AVE_MAX_ETHFRAME);
+		if (!skb) {
+			netdev_err(ndev, "can't allocate skb for Rx\n");
+			return -ENOMEM;
+		}
+	}
+
+	/* set disable to cmdsts */
+	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+			      AVE_STS_INTR | AVE_STS_OWN);
+
+	/* map Rx buffer
+	 * Rx buffer set to the Rx descriptor has two restrictions:
+	 * - Rx buffer address is 4 byte aligned.
+	 * - Rx buffer begins with 2 byte headroom, and data will be put from
+	 *   (buffer + 2).
+	 * To satisfy this, specify the address to put back the buffer
+	 * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
+	 * and expand the map size by NET_IP_ALIGN.
+	 */
+	ret = ave_dma_map(ndev, &priv->rx.desc[entry],
+			  skb->data - NET_IP_ALIGN,
+			  AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+			  DMA_FROM_DEVICE, &paddr);
+	if (ret) {
+		netdev_err(ndev, "can't map skb for Rx\n");
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+	priv->rx.desc[entry].skbs = skb;
+
+	/* set buffer pointer */
+	ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr);
+
+	/* set enable to cmdsts */
+	ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry,
+			      AVE_STS_INTR | AVE_MAX_ETHFRAME);
+
+	return ret;
+}
+
+/* Switch state of descriptor */
+static int ave_desc_switch(struct net_device *ndev, enum desc_state state)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	int ret = 0;
+	u32 val;
+
+	switch (state) {
+	case AVE_DESC_START:
+		writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC);
+		break;
+
+	case AVE_DESC_STOP:
+		writel(0, priv->base + AVE_DESCC);
+		if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val,
+				       150, 15000)) {
+			netdev_err(ndev, "can't stop descriptor\n");
+			ret = -EBUSY;
+		}
+		break;
+
+	case AVE_DESC_RX_SUSPEND:
+		val = readl(priv->base + AVE_DESCC);
+		val |= AVE_DESCC_RDSTP;
+		val &= ~AVE_DESCC_STATUS_MASK;
+		writel(val, priv->base + AVE_DESCC);
+		if (readl_poll_timeout(priv->base + AVE_DESCC, val,
+				       val & (AVE_DESCC_RDSTP << 16),
+				       150, 150000)) {
+			netdev_err(ndev, "can't suspend descriptor\n");
+			ret = -EBUSY;
+		}
+		break;
+
+	case AVE_DESC_RX_PERMIT:
+		val = readl(priv->base + AVE_DESCC);
+		val &= ~AVE_DESCC_RDSTP;
+		val &= ~AVE_DESCC_STATUS_MASK;
+		writel(val, priv->base + AVE_DESCC);
+		break;
+
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int ave_tx_complete(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 proc_idx, done_idx, ndesc, cmdsts;
+	unsigned int nr_freebuf = 0;
+	unsigned int tx_packets = 0;
+	unsigned int tx_bytes = 0;
+
+	proc_idx = priv->tx.proc_idx;
+	done_idx = priv->tx.done_idx;
+	ndesc    = priv->tx.ndesc;
+
+	/* free pre-stored skb from done_idx to proc_idx */
+	while (proc_idx != done_idx) {
+		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx);
+
+		/* do nothing if owner is HW (==1 for Tx) */
+		if (cmdsts & AVE_STS_OWN)
+			break;
+
+		/* check Tx status and updates statistics */
+		if (cmdsts & AVE_STS_OK) {
+			tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK;
+			/* success */
+			if (cmdsts & AVE_STS_LAST)
+				tx_packets++;
+		} else {
+			/* error */
+			if (cmdsts & AVE_STS_LAST) {
+				priv->stats_tx.errors++;
+				if (cmdsts & (AVE_STS_OWC | AVE_STS_EC))
+					priv->stats_tx.collisions++;
+			}
+		}
+
+		/* release skb */
+		if (priv->tx.desc[done_idx].skbs) {
+			ave_dma_unmap(ndev, &priv->tx.desc[done_idx],
+				      DMA_TO_DEVICE);
+			dev_consume_skb_any(priv->tx.desc[done_idx].skbs);
+			priv->tx.desc[done_idx].skbs = NULL;
+			nr_freebuf++;
+		}
+		done_idx = (done_idx + 1) % ndesc;
+	}
+
+	priv->tx.done_idx = done_idx;
+
+	/* update stats */
+	u64_stats_update_begin(&priv->stats_tx.syncp);
+	priv->stats_tx.packets += tx_packets;
+	priv->stats_tx.bytes   += tx_bytes;
+	u64_stats_update_end(&priv->stats_tx.syncp);
+
+	/* wake queue for freeing buffer */
+	if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf)
+		netif_wake_queue(ndev);
+
+	return nr_freebuf;
+}
+
+static int ave_rx_receive(struct net_device *ndev, int num)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	unsigned int rx_packets = 0;
+	unsigned int rx_bytes = 0;
+	u32 proc_idx, done_idx;
+	struct sk_buff *skb;
+	unsigned int pktlen;
+	int restpkt, npkts;
+	u32 ndesc, cmdsts;
+
+	proc_idx = priv->rx.proc_idx;
+	done_idx = priv->rx.done_idx;
+	ndesc    = priv->rx.ndesc;
+	restpkt  = ((proc_idx + ndesc - 1) - done_idx) % ndesc;
+
+	for (npkts = 0; npkts < num; npkts++) {
+		/* we can't receive more packet, so fill desc quickly */
+		if (--restpkt < 0)
+			break;
+
+		cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx);
+
+		/* do nothing if owner is HW (==0 for Rx) */
+		if (!(cmdsts & AVE_STS_OWN))
+			break;
+
+		if (!(cmdsts & AVE_STS_OK)) {
+			priv->stats_rx.errors++;
+			proc_idx = (proc_idx + 1) % ndesc;
+			continue;
+		}
+
+		pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK;
+
+		/* get skbuff for rx */
+		skb = priv->rx.desc[proc_idx].skbs;
+		priv->rx.desc[proc_idx].skbs = NULL;
+
+		ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE);
+
+		skb->dev = ndev;
+		skb_put(skb, pktlen);
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER)))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		rx_packets++;
+		rx_bytes += pktlen;
+
+		netif_receive_skb(skb);
+
+		proc_idx = (proc_idx + 1) % ndesc;
+	}
+
+	priv->rx.proc_idx = proc_idx;
+
+	/* update stats */
+	u64_stats_update_begin(&priv->stats_rx.syncp);
+	priv->stats_rx.packets += rx_packets;
+	priv->stats_rx.bytes   += rx_bytes;
+	u64_stats_update_end(&priv->stats_rx.syncp);
+
+	/* refill the Rx buffers */
+	while (proc_idx != done_idx) {
+		if (ave_rxdesc_prepare(ndev, done_idx))
+			break;
+		done_idx = (done_idx + 1) % ndesc;
+	}
+
+	priv->rx.done_idx = done_idx;
+
+	return npkts;
+}
+
+static int ave_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+	struct ave_private *priv;
+	struct net_device *ndev;
+	int num;
+
+	priv = container_of(napi, struct ave_private, napi_rx);
+	ndev = priv->ndev;
+
+	num = ave_rx_receive(ndev, budget);
+	if (num < budget) {
+		napi_complete_done(napi, num);
+
+		/* enable Rx interrupt when NAPI finishes */
+		ave_irq_enable(ndev, AVE_GI_RXIINT);
+	}
+
+	return num;
+}
+
+static int ave_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+	struct ave_private *priv;
+	struct net_device *ndev;
+	int num;
+
+	priv = container_of(napi, struct ave_private, napi_tx);
+	ndev = priv->ndev;
+
+	num = ave_tx_complete(ndev);
+	napi_complete(napi);
+
+	/* enable Tx interrupt when NAPI finishes */
+	ave_irq_enable(ndev, AVE_GI_TX);
+
+	return num;
+}
+
+static void ave_global_reset(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 val;
+
+	/* set config register */
+	val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE;
+	if (!phy_interface_mode_is_rgmii(priv->phy_mode))
+		val |= AVE_CFGR_MII;
+	writel(val, priv->base + AVE_CFGR);
+
+	/* reset RMII register */
+	val = readl(priv->base + AVE_RSTCTRL);
+	val &= ~AVE_RSTCTRL_RMIIRST;
+	writel(val, priv->base + AVE_RSTCTRL);
+
+	/* assert reset */
+	writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR);
+	msleep(20);
+
+	/* 1st, negate PHY reset only */
+	writel(AVE_GRR_GRST, priv->base + AVE_GRR);
+	msleep(40);
+
+	/* negate reset */
+	writel(0, priv->base + AVE_GRR);
+	msleep(40);
+
+	/* negate RMII register */
+	val = readl(priv->base + AVE_RSTCTRL);
+	val |= AVE_RSTCTRL_RMIIRST;
+	writel(val, priv->base + AVE_RSTCTRL);
+
+	ave_irq_disable_all(ndev);
+}
+
+static void ave_rxfifo_reset(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 rxcr_org;
+
+	/* save and disable MAC receive op */
+	rxcr_org = readl(priv->base + AVE_RXCR);
+	writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR);
+
+	/* suspend Rx descriptor */
+	ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND);
+
+	/* receive all packets before descriptor starts */
+	ave_rx_receive(ndev, priv->rx.ndesc);
+
+	/* assert reset */
+	writel(AVE_GRR_RXFFR, priv->base + AVE_GRR);
+	usleep_range(40, 50);
+
+	/* negate reset */
+	writel(0, priv->base + AVE_GRR);
+	usleep_range(10, 20);
+
+	/* negate interrupt status */
+	writel(AVE_GI_RXOVF, priv->base + AVE_GISR);
+
+	/* permit descriptor */
+	ave_desc_switch(ndev, AVE_DESC_RX_PERMIT);
+
+	/* restore MAC reccieve op */
+	writel(rxcr_org, priv->base + AVE_RXCR);
+}
+
+static irqreturn_t ave_irq_handler(int irq, void *netdev)
+{
+	struct net_device *ndev = (struct net_device *)netdev;
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 gimr_val, gisr_val;
+
+	gimr_val = ave_irq_disable_all(ndev);
+
+	/* get interrupt status */
+	gisr_val = readl(priv->base + AVE_GISR);
+
+	/* PHY */
+	if (gisr_val & AVE_GI_PHY)
+		writel(AVE_GI_PHY, priv->base + AVE_GISR);
+
+	/* check exceeding packet */
+	if (gisr_val & AVE_GI_RXERR) {
+		writel(AVE_GI_RXERR, priv->base + AVE_GISR);
+		netdev_err(ndev, "receive a packet exceeding frame buffer\n");
+	}
+
+	gisr_val &= gimr_val;
+	if (!gisr_val)
+		goto exit_isr;
+
+	/* RxFIFO overflow */
+	if (gisr_val & AVE_GI_RXOVF) {
+		priv->stats_rx.fifo_errors++;
+		ave_rxfifo_reset(ndev);
+		goto exit_isr;
+	}
+
+	/* Rx drop */
+	if (gisr_val & AVE_GI_RXDROP) {
+		priv->stats_rx.dropped++;
+		writel(AVE_GI_RXDROP, priv->base + AVE_GISR);
+	}
+
+	/* Rx interval */
+	if (gisr_val & AVE_GI_RXIINT) {
+		napi_schedule(&priv->napi_rx);
+		/* still force to disable Rx interrupt until NAPI finishes */
+		gimr_val &= ~AVE_GI_RXIINT;
+	}
+
+	/* Tx completed */
+	if (gisr_val & AVE_GI_TX) {
+		napi_schedule(&priv->napi_tx);
+		/* still force to disable Tx interrupt until NAPI finishes */
+		gimr_val &= ~AVE_GI_TX;
+	}
+
+exit_isr:
+	ave_irq_restore(ndev, gimr_val);
+
+	return IRQ_HANDLED;
+}
+
+static int ave_pfsel_start(struct net_device *ndev, unsigned int entry)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 val;
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return -EINVAL;
+
+	val = readl(priv->base + AVE_PFEN);
+	writel(val | BIT(entry), priv->base + AVE_PFEN);
+
+	return 0;
+}
+
+static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 val;
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return -EINVAL;
+
+	val = readl(priv->base + AVE_PFEN);
+	writel(val & ~BIT(entry), priv->base + AVE_PFEN);
+
+	return 0;
+}
+
+static int ave_pfsel_set_macaddr(struct net_device *ndev,
+				 unsigned int entry,
+				 const unsigned char *mac_addr,
+				 unsigned int set_size)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return -EINVAL;
+	if (WARN_ON(set_size > 6))
+		return -EINVAL;
+
+	ave_pfsel_stop(ndev, entry);
+
+	/* set MAC address for the filter */
+	ave_hw_write_macaddr(ndev, mac_addr,
+			     AVE_PKTF(entry), AVE_PKTF(entry) + 4);
+
+	/* set byte mask */
+	writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0,
+	       priv->base + AVE_PFMBYTE(entry));
+	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+	/* set bit mask filter */
+	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+	/* set selector to ring 0 */
+	writel(0, priv->base + AVE_PFSEL(entry));
+
+	/* restart filter */
+	ave_pfsel_start(ndev, entry);
+
+	return 0;
+}
+
+static void ave_pfsel_set_promisc(struct net_device *ndev,
+				  unsigned int entry, u32 rxring)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	if (WARN_ON(entry > AVE_PF_SIZE))
+		return;
+
+	ave_pfsel_stop(ndev, entry);
+
+	/* set byte mask */
+	writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry));
+	writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4);
+
+	/* set bit mask filter */
+	writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry));
+
+	/* set selector to rxring */
+	writel(rxring, priv->base + AVE_PFSEL(entry));
+
+	ave_pfsel_start(ndev, entry);
+}
+
+static void ave_pfsel_init(struct net_device *ndev)
+{
+	unsigned char bcast_mac[ETH_ALEN];
+	int i;
+
+	eth_broadcast_addr(bcast_mac);
+
+	for (i = 0; i < AVE_PF_SIZE; i++)
+		ave_pfsel_stop(ndev, i);
+
+	/* promiscious entry, select ring 0 */
+	ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0);
+
+	/* unicast entry */
+	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+
+	/* broadcast entry */
+	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6);
+}
+
+static void ave_phy_adjust_link(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct phy_device *phydev = ndev->phydev;
+	u32 val, txcr, rxcr, rxcr_org;
+	u16 rmt_adv = 0, lcl_adv = 0;
+	u8 cap;
+
+	/* set RGMII speed */
+	val = readl(priv->base + AVE_TXCR);
+	val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G);
+
+	if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000)
+		val |= AVE_TXCR_TXSPD_1G;
+	else if (phydev->speed == SPEED_100)
+		val |= AVE_TXCR_TXSPD_100;
+
+	writel(val, priv->base + AVE_TXCR);
+
+	/* set RMII speed (100M/10M only) */
+	if (!phy_interface_is_rgmii(phydev)) {
+		val = readl(priv->base + AVE_LINKSEL);
+		if (phydev->speed == SPEED_10)
+			val &= ~AVE_LINKSEL_100M;
+		else
+			val |= AVE_LINKSEL_100M;
+		writel(val, priv->base + AVE_LINKSEL);
+	}
+
+	/* check current RXCR/TXCR */
+	rxcr = readl(priv->base + AVE_RXCR);
+	txcr = readl(priv->base + AVE_TXCR);
+	rxcr_org = rxcr;
+
+	if (phydev->duplex) {
+		rxcr |= AVE_RXCR_FDUPEN;
+
+		if (phydev->pause)
+			rmt_adv |= LPA_PAUSE_CAP;
+		if (phydev->asym_pause)
+			rmt_adv |= LPA_PAUSE_ASYM;
+		if (phydev->advertising & ADVERTISED_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_CAP;
+		if (phydev->advertising & ADVERTISED_Asym_Pause)
+			lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+		cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+		if (cap & FLOW_CTRL_TX)
+			txcr |= AVE_TXCR_FLOCTR;
+		else
+			txcr &= ~AVE_TXCR_FLOCTR;
+		if (cap & FLOW_CTRL_RX)
+			rxcr |= AVE_RXCR_FLOCTR;
+		else
+			rxcr &= ~AVE_RXCR_FLOCTR;
+	} else {
+		rxcr &= ~AVE_RXCR_FDUPEN;
+		rxcr &= ~AVE_RXCR_FLOCTR;
+		txcr &= ~AVE_TXCR_FLOCTR;
+	}
+
+	if (rxcr_org != rxcr) {
+		/* disable Rx mac */
+		writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR);
+		/* change and enable TX/Rx mac */
+		writel(txcr, priv->base + AVE_TXCR);
+		writel(rxcr, priv->base + AVE_RXCR);
+	}
+
+	phy_print_status(phydev);
+}
+
+static void ave_macaddr_init(struct net_device *ndev)
+{
+	ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R);
+
+	/* pfsel unicast entry */
+	ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6);
+}
+
+static int ave_init(struct net_device *ndev)
+{
+	struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+	struct ave_private *priv = netdev_priv(ndev);
+	struct device *dev = ndev->dev.parent;
+	struct device_node *np = dev->of_node;
+	struct device_node *mdio_np;
+	struct phy_device *phydev;
+	int ret;
+
+	/* enable clk because of hw access until ndo_open */
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(dev, "can't enable clock\n");
+		return ret;
+	}
+	ret = reset_control_deassert(priv->rst);
+	if (ret) {
+		dev_err(dev, "can't deassert reset\n");
+		goto out_clk_disable;
+	}
+
+	ave_global_reset(ndev);
+
+	mdio_np = of_get_child_by_name(np, "mdio");
+	if (!mdio_np) {
+		dev_err(dev, "mdio node not found\n");
+		ret = -EINVAL;
+		goto out_reset_assert;
+	}
+	ret = of_mdiobus_register(priv->mdio, mdio_np);
+	of_node_put(mdio_np);
+	if (ret) {
+		dev_err(dev, "failed to register mdiobus\n");
+		goto out_reset_assert;
+	}
+
+	phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link);
+	if (!phydev) {
+		dev_err(dev, "could not attach to PHY\n");
+		ret = -ENODEV;
+		goto out_mdio_unregister;
+	}
+
+	priv->phydev = phydev;
+
+	phy_ethtool_get_wol(phydev, &wol);
+	device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+
+	if (!phy_interface_is_rgmii(phydev)) {
+		phydev->supported &= ~PHY_GBIT_FEATURES;
+		phydev->supported |= PHY_BASIC_FEATURES;
+	}
+	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+	phy_attached_info(phydev);
+
+	return 0;
+
+out_mdio_unregister:
+	mdiobus_unregister(priv->mdio);
+out_reset_assert:
+	reset_control_assert(priv->rst);
+out_clk_disable:
+	clk_disable_unprepare(priv->clk);
+
+	return ret;
+}
+
+static void ave_uninit(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+
+	phy_disconnect(priv->phydev);
+	mdiobus_unregister(priv->mdio);
+
+	/* disable clk because of hw access after ndo_stop */
+	reset_control_assert(priv->rst);
+	clk_disable_unprepare(priv->clk);
+}
+
+static int ave_open(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	int entry;
+	int ret;
+	u32 val;
+
+	ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name,
+			  ndev);
+	if (ret)
+		return ret;
+
+	priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc),
+				GFP_KERNEL);
+	if (!priv->tx.desc) {
+		ret = -ENOMEM;
+		goto out_free_irq;
+	}
+
+	priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc),
+				GFP_KERNEL);
+	if (!priv->rx.desc) {
+		kfree(priv->tx.desc);
+		ret = -ENOMEM;
+		goto out_free_irq;
+	}
+
+	/* initialize Tx work and descriptor */
+	priv->tx.proc_idx = 0;
+	priv->tx.done_idx = 0;
+	for (entry = 0; entry < priv->tx.ndesc; entry++) {
+		ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0);
+		ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0);
+	}
+	writel(AVE_TXDC_ADDR_START |
+	       (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE),
+	       priv->base + AVE_TXDC);
+
+	/* initialize Rx work and descriptor */
+	priv->rx.proc_idx = 0;
+	priv->rx.done_idx = 0;
+	for (entry = 0; entry < priv->rx.ndesc; entry++) {
+		if (ave_rxdesc_prepare(ndev, entry))
+			break;
+	}
+	writel(AVE_RXDC0_ADDR_START |
+	       (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE),
+	       priv->base + AVE_RXDC0);
+
+	ave_desc_switch(ndev, AVE_DESC_START);
+
+	ave_pfsel_init(ndev);
+	ave_macaddr_init(ndev);
+
+	/* set Rx configuration */
+	/* full duplex, enable pause drop, enalbe flow control */
+	val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN |
+		AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK);
+	writel(val, priv->base + AVE_RXCR);
+
+	/* set Tx configuration */
+	/* enable flow control, disable loopback */
+	writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR);
+
+	/* enable timer, clear EN,INTM, and mask interval unit(BSCK) */
+	val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK;
+	val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
+	writel(val, priv->base + AVE_IIRQC);
+
+	val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX;
+	ave_irq_restore(ndev, val);
+
+	napi_enable(&priv->napi_rx);
+	napi_enable(&priv->napi_tx);
+
+	phy_start(ndev->phydev);
+	phy_start_aneg(ndev->phydev);
+	netif_start_queue(ndev);
+
+	return 0;
+
+out_free_irq:
+	disable_irq(priv->irq);
+	free_irq(priv->irq, ndev);
+
+	return ret;
+}
+
+static int ave_stop(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	int entry;
+
+	ave_irq_disable_all(ndev);
+	disable_irq(priv->irq);
+	free_irq(priv->irq, ndev);
+
+	netif_tx_disable(ndev);
+	phy_stop(ndev->phydev);
+	napi_disable(&priv->napi_tx);
+	napi_disable(&priv->napi_rx);
+
+	ave_desc_switch(ndev, AVE_DESC_STOP);
+
+	/* free Tx buffer */
+	for (entry = 0; entry < priv->tx.ndesc; entry++) {
+		if (!priv->tx.desc[entry].skbs)
+			continue;
+
+		ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE);
+		dev_kfree_skb_any(priv->tx.desc[entry].skbs);
+		priv->tx.desc[entry].skbs = NULL;
+	}
+	priv->tx.proc_idx = 0;
+	priv->tx.done_idx = 0;
+
+	/* free Rx buffer */
+	for (entry = 0; entry < priv->rx.ndesc; entry++) {
+		if (!priv->rx.desc[entry].skbs)
+			continue;
+
+		ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE);
+		dev_kfree_skb_any(priv->rx.desc[entry].skbs);
+		priv->rx.desc[entry].skbs = NULL;
+	}
+	priv->rx.proc_idx = 0;
+	priv->rx.done_idx = 0;
+
+	kfree(priv->tx.desc);
+	kfree(priv->rx.desc);
+
+	return 0;
+}
+
+static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	u32 proc_idx, done_idx, ndesc, cmdsts;
+	int ret, freepkt;
+	dma_addr_t paddr;
+
+	proc_idx = priv->tx.proc_idx;
+	done_idx = priv->tx.done_idx;
+	ndesc = priv->tx.ndesc;
+	freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc;
+
+	/* stop queue when not enough entry */
+	if (unlikely(freepkt < 1)) {
+		netif_stop_queue(ndev);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* add padding for short packet */
+	if (skb_put_padto(skb, ETH_ZLEN)) {
+		priv->stats_tx.dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/* map Tx buffer
+	 * Tx buffer set to the Tx descriptor doesn't have any restriction.
+	 */
+	ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx],
+			  skb->data, skb->len, DMA_TO_DEVICE, &paddr);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		priv->stats_tx.dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	priv->tx.desc[proc_idx].skbs = skb;
+
+	ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr);
+
+	cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST |
+		(skb->len & AVE_STS_PKTLEN_TX_MASK);
+
+	/* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */
+	if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev))
+		cmdsts |= AVE_STS_INTR;
+
+	/* disable checksum calculation when skb doesn't calurate checksum */
+	if (skb->ip_summed == CHECKSUM_NONE ||
+	    skb->ip_summed == CHECKSUM_UNNECESSARY)
+		cmdsts |= AVE_STS_NOCSUM;
+
+	ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts);
+
+	priv->tx.proc_idx = (proc_idx + 1) % ndesc;
+
+	return NETDEV_TX_OK;
+}
+
+static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+	return phy_mii_ioctl(ndev->phydev, ifr, cmd);
+}
+
+static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+static void ave_set_rx_mode(struct net_device *ndev)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	struct netdev_hw_addr *hw_adr;
+	int count, mc_cnt;
+	u32 val;
+
+	/* MAC addr filter enable for promiscious mode */
+	mc_cnt = netdev_mc_count(ndev);
+	val = readl(priv->base + AVE_RXCR);
+	if (ndev->flags & IFF_PROMISC || !mc_cnt)
+		val &= ~AVE_RXCR_AFEN;
+	else
+		val |= AVE_RXCR_AFEN;
+	writel(val, priv->base + AVE_RXCR);
+
+	/* set all multicast address */
+	if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) {
+		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST,
+				      v4multi_macadr, 1);
+		ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1,
+				      v6multi_macadr, 1);
+	} else {
+		/* stop all multicast filter */
+		for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++)
+			ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count);
+
+		/* set multicast addresses */
+		count = 0;
+		netdev_for_each_mc_addr(hw_adr, ndev) {
+			if (count == mc_cnt)
+				break;
+			ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count,
+					      hw_adr->addr, 6);
+			count++;
+		}
+	}
+}
+
+static void ave_get_stats64(struct net_device *ndev,
+			    struct rtnl_link_stats64 *stats)
+{
+	struct ave_private *priv = netdev_priv(ndev);
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp);
+		stats->rx_packets = priv->stats_rx.packets;
+		stats->rx_bytes	  = priv->stats_rx.bytes;
+	} while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start));
+
+	do {
+		start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp);
+		stats->tx_packets = priv->stats_tx.packets;
+		stats->tx_bytes	  = priv->stats_tx.bytes;
+	} while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start));
+
+	stats->rx_errors      = priv->stats_rx.errors;
+	stats->tx_errors      = priv->stats_tx.errors;
+	stats->rx_dropped     = priv->stats_rx.dropped;
+	stats->tx_dropped     = priv->stats_tx.dropped;
+	stats->rx_fifo_errors = priv->stats_rx.fifo_errors;
+	stats->collisions     = priv->stats_tx.collisions;
+}
+
+static int ave_set_mac_address(struct net_device *ndev, void *p)
+{
+	int ret = eth_mac_addr(ndev, p);
+
+	if (ret)
+		return ret;
+
+	ave_macaddr_init(ndev);
+
+	return 0;
+}
+
+static const struct net_device_ops ave_netdev_ops = {
+	.ndo_init		= ave_init,
+	.ndo_uninit		= ave_uninit,
+	.ndo_open		= ave_open,
+	.ndo_stop		= ave_stop,
+	.ndo_start_xmit		= ave_start_xmit,
+	.ndo_do_ioctl		= ave_ioctl,
+	.ndo_set_rx_mode	= ave_set_rx_mode,
+	.ndo_get_stats64	= ave_get_stats64,
+	.ndo_set_mac_address	= ave_set_mac_address,
+};
+
+static int ave_probe(struct platform_device *pdev)
+{
+	const struct ave_soc_data *data;
+	struct device *dev = &pdev->dev;
+	char buf[ETHTOOL_FWVERS_LEN];
+	phy_interface_t phy_mode;
+	struct ave_private *priv;
+	struct net_device *ndev;
+	struct device_node *np;
+	struct resource	*res;
+	const void *mac_addr;
+	void __iomem *base;
+	u64 dma_mask;
+	int irq, ret;
+	u32 ave_id;
+
+	data = of_device_get_match_data(dev);
+	if (WARN_ON(!data))
+		return -EINVAL;
+
+	np = dev->of_node;
+	phy_mode = of_get_phy_mode(np);
+	if (phy_mode < 0) {
+		dev_err(dev, "phy-mode not found\n");
+		return -EINVAL;
+	}
+	if ((!phy_interface_mode_is_rgmii(phy_mode)) &&
+	    phy_mode != PHY_INTERFACE_MODE_RMII &&
+	    phy_mode != PHY_INTERFACE_MODE_MII) {
+		dev_err(dev, "phy-mode is invalid\n");
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "IRQ not found\n");
+		return irq;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	ndev = alloc_etherdev(sizeof(struct ave_private));
+	if (!ndev) {
+		dev_err(dev, "can't allocate ethernet device\n");
+		return -ENOMEM;
+	}
+
+	ndev->netdev_ops = &ave_netdev_ops;
+	ndev->ethtool_ops = &ave_ethtool_ops;
+	SET_NETDEV_DEV(ndev, dev);
+
+	ndev->features    |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+	ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM);
+
+	ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
+
+	mac_addr = of_get_mac_address(np);
+	if (mac_addr)
+		ether_addr_copy(ndev->dev_addr, mac_addr);
+
+	/* if the mac address is invalid, use random mac address */
+	if (!is_valid_ether_addr(ndev->dev_addr)) {
+		eth_hw_addr_random(ndev);
+		dev_warn(dev, "Using random MAC address: %pM\n",
+			 ndev->dev_addr);
+	}
+
+	priv = netdev_priv(ndev);
+	priv->base = base;
+	priv->irq = irq;
+	priv->ndev = ndev;
+	priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE);
+	priv->phy_mode = phy_mode;
+	priv->data = data;
+
+	if (IS_DESC_64BIT(priv)) {
+		priv->desc_size = AVE_DESC_SIZE_64;
+		priv->tx.daddr  = AVE_TXDM_64;
+		priv->rx.daddr  = AVE_RXDM_64;
+		dma_mask = DMA_BIT_MASK(64);
+	} else {
+		priv->desc_size = AVE_DESC_SIZE_32;
+		priv->tx.daddr  = AVE_TXDM_32;
+		priv->rx.daddr  = AVE_RXDM_32;
+		dma_mask = DMA_BIT_MASK(32);
+	}
+	ret = dma_set_mask(dev, dma_mask);
+	if (ret)
+		goto out_free_netdev;
+
+	priv->tx.ndesc = AVE_NR_TXDESC;
+	priv->rx.ndesc = AVE_NR_RXDESC;
+
+	u64_stats_init(&priv->stats_tx.syncp);
+	u64_stats_init(&priv->stats_rx.syncp);
+
+	priv->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		ret = PTR_ERR(priv->clk);
+		goto out_free_netdev;
+	}
+
+	priv->rst = devm_reset_control_get_optional_shared(dev, NULL);
+	if (IS_ERR(priv->rst)) {
+		ret = PTR_ERR(priv->rst);
+		goto out_free_netdev;
+	}
+
+	priv->mdio = devm_mdiobus_alloc(dev);
+	if (!priv->mdio) {
+		ret = -ENOMEM;
+		goto out_free_netdev;
+	}
+	priv->mdio->priv = ndev;
+	priv->mdio->parent = dev;
+	priv->mdio->read = ave_mdiobus_read;
+	priv->mdio->write = ave_mdiobus_write;
+	priv->mdio->name = "uniphier-mdio";
+	snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x",
+		 pdev->name, pdev->id);
+
+	/* Register as a NAPI supported driver */
+	netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+	netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
+			  priv->tx.ndesc);
+
+	platform_set_drvdata(pdev, ndev);
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(dev, "failed to register netdevice\n");
+		goto out_del_napi;
+	}
+
+	/* get ID and version */
+	ave_id = readl(priv->base + AVE_IDR);
+	ave_hw_read_version(ndev, buf, sizeof(buf));
+
+	dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n",
+		 (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff,
+		 (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff,
+		 buf, priv->irq, phy_modes(phy_mode));
+
+	return 0;
+
+out_del_napi:
+	netif_napi_del(&priv->napi_rx);
+	netif_napi_del(&priv->napi_tx);
+out_free_netdev:
+	free_netdev(ndev);
+
+	return ret;
+}
+
+static int ave_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct ave_private *priv = netdev_priv(ndev);
+
+	unregister_netdev(ndev);
+	netif_napi_del(&priv->napi_rx);
+	netif_napi_del(&priv->napi_tx);
+	free_netdev(ndev);
+
+	return 0;
+}
+
+static const struct ave_soc_data ave_pro4_data = {
+	.is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_pxs2_data = {
+	.is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld11_data = {
+	.is_desc_64bit = false,
+};
+
+static const struct ave_soc_data ave_ld20_data = {
+	.is_desc_64bit = true,
+};
+
+static const struct of_device_id of_ave_match[] = {
+	{
+		.compatible = "socionext,uniphier-pro4-ave4",
+		.data = &ave_pro4_data,
+	},
+	{
+		.compatible = "socionext,uniphier-pxs2-ave4",
+		.data = &ave_pxs2_data,
+	},
+	{
+		.compatible = "socionext,uniphier-ld11-ave4",
+		.data = &ave_ld11_data,
+	},
+	{
+		.compatible = "socionext,uniphier-ld20-ave4",
+		.data = &ave_ld20_data,
+	},
+	{ /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_ave_match);
+
+static struct platform_driver ave_driver = {
+	.probe  = ave_probe,
+	.remove = ave_remove,
+	.driver	= {
+		.name = "ave",
+		.of_match_table	= of_ave_match,
+	},
+};
+module_platform_driver(ave_driver);
+
+MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7e089bf..2fd84569 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -406,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
 	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
 
 	for (i = 0; i < size; i++) {
-		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 			i, (unsigned int)virt_to_phys(p),
 			le32_to_cpu(p->des0), le32_to_cpu(p->des1),
 			le32_to_cpu(p->des2), le32_to_cpu(p->des3));
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2a828a3..b47cb5c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -428,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
 		u64 x;
 
 		x = *(u64 *)ep;
-		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
 			i, (unsigned int)virt_to_phys(ep),
 			(unsigned int)x, (unsigned int)(x >> 32),
 			ep->basic.des2, ep->basic.des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index db4cee5..ebd9e5e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx)
 		u64 x;
 
 		x = *(u64 *)p;
-		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
 			i, (unsigned int)virt_to_phys(p),
 			(unsigned int)x, (unsigned int)(x >> 32),
 			p->des2, p->des3);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c0af0bc..f99f14c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2003,22 +2003,60 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
 {
 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
-	int status;
+	u32 rx_channel_count = priv->plat->rx_queues_to_use;
+	u32 channels_to_check = tx_channel_count > rx_channel_count ?
+				tx_channel_count : rx_channel_count;
 	u32 chan;
+	bool poll_scheduled = false;
+	int status[channels_to_check];
 
-	for (chan = 0; chan < tx_channel_count; chan++) {
-		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+	/* Each DMA channel can be used for rx and tx simultaneously, yet
+	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
+	 * stmmac_channel struct.
+	 * Because of this, stmmac_poll currently checks (and possibly wakes)
+	 * all tx queues rather than just a single tx queue.
+	 */
+	for (chan = 0; chan < channels_to_check; chan++)
+		status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
+							    &priv->xstats,
+							    chan);
 
-		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
-						      &priv->xstats, chan);
-		if (likely((status & handle_rx)) || (status & handle_tx)) {
+	for (chan = 0; chan < rx_channel_count; chan++) {
+		if (likely(status[chan] & handle_rx)) {
+			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+
 			if (likely(napi_schedule_prep(&rx_q->napi))) {
 				stmmac_disable_dma_irq(priv, chan);
 				__napi_schedule(&rx_q->napi);
+				poll_scheduled = true;
 			}
 		}
+	}
 
-		if (unlikely(status & tx_hard_error_bump_tc)) {
+	/* If we scheduled poll, we already know that tx queues will be checked.
+	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
+	 * completed transmission, if so, call stmmac_poll (once).
+	 */
+	if (!poll_scheduled) {
+		for (chan = 0; chan < tx_channel_count; chan++) {
+			if (status[chan] & handle_tx) {
+				/* It doesn't matter what rx queue we choose
+				 * here. We use 0 since it always exists.
+				 */
+				struct stmmac_rx_queue *rx_q =
+					&priv->rx_queue[0];
+
+				if (likely(napi_schedule_prep(&rx_q->napi))) {
+					stmmac_disable_dma_irq(priv, chan);
+					__napi_schedule(&rx_q->napi);
+				}
+				break;
+			}
+		}
+	}
+
+	for (chan = 0; chan < tx_channel_count; chan++) {
+		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
 			/* Try to bump up the dma threshold on this failure */
 			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
 			    (tc <= 256)) {
@@ -2035,7 +2073,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
 								    chan);
 				priv->xstats.threshold = tc;
 			}
-		} else if (unlikely(status == tx_hard_error)) {
+		} else if (unlikely(status[chan] == tx_hard_error)) {
 			stmmac_tx_err(priv, chan);
 		}
 	}
@@ -3404,9 +3442,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 			if (netif_msg_rx_status(priv)) {
 				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
 					   p, entry, des);
-				if (frame_len > ETH_FRAME_LEN)
-					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
-						   frame_len, status);
+				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+					   frame_len, status);
 			}
 
 			/* The zero-copy is always used for all the sizes
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index d655a42..eb1c6b0 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -333,9 +333,8 @@ void xlgmac_print_pkt(struct net_device *netdev,
 		      struct sk_buff *skb, bool tx_rx)
 {
 	struct ethhdr *eth = (struct ethhdr *)skb->data;
-	unsigned char *buf = skb->data;
 	unsigned char buffer[128];
-	unsigned int i, j;
+	unsigned int i;
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
@@ -346,22 +345,13 @@ void xlgmac_print_pkt(struct net_device *netdev,
 	netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
 	netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
-	for (i = 0, j = 0; i < skb->len;) {
-		j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
-			      buf[i++]);
+	for (i = 0; i < skb->len; i += 32) {
+		unsigned int len = min(skb->len - i, 32U);
 
-		if ((i % 32) == 0) {
-			netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
-			j = 0;
-		} else if ((i % 16) == 0) {
-			buffer[j++] = ' ';
-			buffer[j++] = ' ';
-		} else if ((i % 4) == 0) {
-			buffer[j++] = ' ';
-		}
+		hex_dump_to_buffer(&skb->data[i], len, 32, 1,
+				   buffer, sizeof(buffer), false);
+		netdev_dbg(netdev, "  %#06x: %s\n", i, buffer);
 	}
-	if (i % 32)
-		netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
 	netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a73600d..3c85a08 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -88,6 +88,7 @@ do {								\
 #define CPSW_VERSION_4		0x190112
 
 #define HOST_PORT_NUM		0
+#define CPSW_ALE_PORTS_NUM	3
 #define SLIVER_SIZE		0x40
 
 #define CPSW1_HOST_PORT_OFFSET	0x028
@@ -352,6 +353,27 @@ struct cpsw_hw_stats {
 	u32	rxdmaoverruns;
 };
 
+struct cpsw_slave_data {
+	struct device_node *phy_node;
+	char		phy_id[MII_BUS_ID_SIZE];
+	int		phy_if;
+	u8		mac_addr[ETH_ALEN];
+	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
+};
+
+struct cpsw_platform_data {
+	struct cpsw_slave_data	*slave_data;
+	u32	ss_reg_ofs;	/* Subsystem control register offset */
+	u32	channels;	/* number of cpdma channels (symmetric) */
+	u32	slaves;		/* number of slave cpgmac ports */
+	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+	u32	ale_entries;	/* ale table size */
+	u32	bd_ram_size;  /*buffer descriptor ram size */
+	u32	mac_control;	/* Mac control register */
+	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
+	bool	dual_emac;	/* Enable Dual EMAC mode */
+};
+
 struct cpsw_slave {
 	void __iomem			*regs;
 	struct cpsw_sliver_regs __iomem	*sliver;
@@ -365,12 +387,12 @@ struct cpsw_slave {
 
 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
 {
-	return __raw_readl(slave->regs + offset);
+	return readl_relaxed(slave->regs + offset);
 }
 
 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
 {
-	__raw_writel(val, slave->regs + offset);
+	writel_relaxed(val, slave->regs + offset);
 }
 
 struct cpsw_vector {
@@ -660,8 +682,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
 
 static void cpsw_intr_enable(struct cpsw_common *cpsw)
 {
-	__raw_writel(0xFF, &cpsw->wr_regs->tx_en);
-	__raw_writel(0xFF, &cpsw->wr_regs->rx_en);
+	writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+	writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
 
 	cpdma_ctlr_int_ctrl(cpsw->dma, true);
 	return;
@@ -669,8 +691,8 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw)
 
 static void cpsw_intr_disable(struct cpsw_common *cpsw)
 {
-	__raw_writel(0, &cpsw->wr_regs->tx_en);
-	__raw_writel(0, &cpsw->wr_regs->rx_en);
+	writel_relaxed(0, &cpsw->wr_regs->tx_en);
+	writel_relaxed(0, &cpsw->wr_regs->rx_en);
 
 	cpdma_ctlr_int_ctrl(cpsw->dma, false);
 	return;
@@ -949,18 +971,14 @@ static inline void soft_reset(const char *module, void __iomem *reg)
 {
 	unsigned long timeout = jiffies + HZ;
 
-	__raw_writel(1, reg);
+	writel_relaxed(1, reg);
 	do {
 		cpu_relax();
-	} while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
+	} while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
 
-	WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
+	WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
 }
 
-#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
-			 ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
-
 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
 			       struct cpsw_priv *priv)
 {
@@ -1015,7 +1033,7 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
 
 	if (mac_control != slave->mac_control) {
 		phy_print_status(phy);
-		__raw_writel(mac_control, &slave->sliver->mac_control);
+		writel_relaxed(mac_control, &slave->sliver->mac_control);
 	}
 
 	slave->mac_control = mac_control;
@@ -1278,7 +1296,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 	soft_reset_slave(slave);
 
 	/* setup priority mapping */
-	__raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+	writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
 
 	switch (cpsw->version) {
 	case CPSW_VERSION_1:
@@ -1304,7 +1322,7 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 	}
 
 	/* setup max packet size, and mac address */
-	__raw_writel(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+	writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
 	cpsw_set_slave_mac(slave, priv);
 
 	slave->mac_control = 0;	/* no link yet */
@@ -1395,9 +1413,9 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
 	writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
 
 	/* setup host port priority mapping */
-	__raw_writel(CPDMA_TX_PRIORITY_MAP,
-		     &cpsw->host_port_regs->cpdma_tx_pri_map);
-	__raw_writel(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+	writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+		       &cpsw->host_port_regs->cpdma_tx_pri_map);
+	writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
 
 	cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
@@ -1514,10 +1532,10 @@ static int cpsw_ndo_open(struct net_device *ndev)
 	/* initialize shared resources for every ndev */
 	if (!cpsw->usage_count) {
 		/* disable priority elevation */
-		__raw_writel(0, &cpsw->regs->ptype);
+		writel_relaxed(0, &cpsw->regs->ptype);
 
 		/* enable statistics collection only on all ports */
-		__raw_writel(0x7, &cpsw->regs->stat_port_en);
+		writel_relaxed(0x7, &cpsw->regs->stat_port_en);
 
 		/* Enable internal fifo flow control */
 		writel(0x7, &cpsw->regs->flow_control);
@@ -1701,7 +1719,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 
 	slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
 	slave_write(slave, ctrl, CPSW2_CONTROL);
-	__raw_writel(ETH_P_1588, &cpsw->regs->ts_ltype);
+	writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
 }
 
 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2298,7 +2316,6 @@ static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
 
 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
 {
-	int (*poll)(struct napi_struct *, int);
 	struct cpsw_common *cpsw = priv->cpsw;
 	void (*handler)(void *, int, int);
 	struct netdev_queue *queue;
@@ -2309,12 +2326,10 @@ static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
 		ch = &cpsw->rx_ch_num;
 		vec = cpsw->rxv;
 		handler = cpsw_rx_handler;
-		poll = cpsw_rx_poll;
 	} else {
 		ch = &cpsw->tx_ch_num;
 		vec = cpsw->txv;
 		handler = cpsw_tx_handler;
-		poll = cpsw_tx_poll;
 	}
 
 	while (*ch < ch_num) {
@@ -3050,17 +3065,23 @@ static int cpsw_probe(struct platform_device *pdev)
 	}
 
 	cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+	if (IS_ERR(cpsw->txv[0].ch)) {
+		dev_err(priv->dev, "error initializing tx dma channel\n");
+		ret = PTR_ERR(cpsw->txv[0].ch);
+		goto clean_dma_ret;
+	}
+
 	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
-	if (WARN_ON(!cpsw->rxv[0].ch || !cpsw->txv[0].ch)) {
-		dev_err(priv->dev, "error initializing dma channels\n");
-		ret = -ENOMEM;
+	if (IS_ERR(cpsw->rxv[0].ch)) {
+		dev_err(priv->dev, "error initializing rx dma channel\n");
+		ret = PTR_ERR(cpsw->rxv[0].ch);
 		goto clean_dma_ret;
 	}
 
 	ale_params.dev			= &pdev->dev;
 	ale_params.ale_ageout		= ale_ageout;
 	ale_params.ale_entries		= data->ale_entries;
-	ale_params.ale_ports		= data->slaves;
+	ale_params.ale_ports		= CPSW_ALE_PORTS_NUM;
 
 	cpsw->ale = cpsw_ale_create(&ale_params);
 	if (!cpsw->ale) {
@@ -3072,14 +3093,14 @@ static int cpsw_probe(struct platform_device *pdev)
 	cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
 	if (IS_ERR(cpsw->cpts)) {
 		ret = PTR_ERR(cpsw->cpts);
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	ndev->irq = platform_get_irq(pdev, 1);
 	if (ndev->irq < 0) {
 		dev_err(priv->dev, "error getting irq resource\n");
 		ret = ndev->irq;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
@@ -3103,7 +3124,7 @@ static int cpsw_probe(struct platform_device *pdev)
 	if (ret) {
 		dev_err(priv->dev, "error registering net device\n");
 		ret = -ENODEV;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	if (cpsw->data.dual_emac) {
@@ -3126,7 +3147,7 @@ static int cpsw_probe(struct platform_device *pdev)
 	irq = platform_get_irq(pdev, 1);
 	if (irq < 0) {
 		ret = irq;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	cpsw->irqs_table[0] = irq;
@@ -3134,14 +3155,14 @@ static int cpsw_probe(struct platform_device *pdev)
 			       0, dev_name(&pdev->dev), cpsw);
 	if (ret < 0) {
 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	/* TX IRQ */
 	irq = platform_get_irq(pdev, 2);
 	if (irq < 0) {
 		ret = irq;
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	cpsw->irqs_table[1] = irq;
@@ -3149,7 +3170,7 @@ static int cpsw_probe(struct platform_device *pdev)
 			       0, dev_name(&pdev->dev), cpsw);
 	if (ret < 0) {
 		dev_err(priv->dev, "error attaching irq (%d)\n", ret);
-		goto clean_ale_ret;
+		goto clean_dma_ret;
 	}
 
 	cpsw_notice(priv, probe,
@@ -3162,8 +3183,6 @@ static int cpsw_probe(struct platform_device *pdev)
 
 clean_unregister_netdev_ret:
 	unregister_netdev(ndev);
-clean_ale_ret:
-	cpsw_ale_destroy(cpsw->ale);
 clean_dma_ret:
 	cpdma_ctlr_destroy(cpsw->dma);
 clean_dt_ret:
@@ -3193,7 +3212,6 @@ static int cpsw_remove(struct platform_device *pdev)
 	unregister_netdev(ndev);
 
 	cpts_release(cpsw->cpts);
-	cpsw_ale_destroy(cpsw->ale);
 	cpdma_ctlr_destroy(cpsw->dma);
 	cpsw_remove_dt(pdev);
 	pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 6c3037a..cf111db3 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -17,26 +17,9 @@
 #include <linux/if_ether.h>
 #include <linux/phy.h>
 
-struct cpsw_slave_data {
-	struct device_node *phy_node;
-	char		phy_id[MII_BUS_ID_SIZE];
-	int		phy_if;
-	u8		mac_addr[ETH_ALEN];
-	u16		dual_emac_res_vlan;	/* Reserved VLAN for DualEMAC */
-};
-
-struct cpsw_platform_data {
-	struct cpsw_slave_data	*slave_data;
-	u32	ss_reg_ofs;	/* Subsystem control register offset */
-	u32	channels;	/* number of cpdma channels (symmetric) */
-	u32	slaves;		/* number of slave cpgmac ports */
-	u32	active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
-	u32	ale_entries;	/* ale table size */
-	u32	bd_ram_size;  /*buffer descriptor ram size */
-	u32	mac_control;	/* Mac control register */
-	u16	default_vlan;	/* Def VLAN for ALE lookup in VLAN aware mode*/
-	bool	dual_emac;	/* Enable Dual EMAC mode */
-};
+#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
+			 ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
 
 void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
 int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index b432a75..93dc05c 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -150,11 +150,11 @@ static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
 
 	WARN_ON(idx > ale->params.ale_entries);
 
-	__raw_writel(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+	writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
 
 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
-		ale_entry[i] = __raw_readl(ale->params.ale_regs +
-					   ALE_TABLE + 4 * i);
+		ale_entry[i] = readl_relaxed(ale->params.ale_regs +
+					     ALE_TABLE + 4 * i);
 
 	return idx;
 }
@@ -166,11 +166,11 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
 	WARN_ON(idx > ale->params.ale_entries);
 
 	for (i = 0; i < ALE_ENTRY_WORDS; i++)
-		__raw_writel(ale_entry[i], ale->params.ale_regs +
-			     ALE_TABLE + 4 * i);
+		writel_relaxed(ale_entry[i], ale->params.ale_regs +
+			       ALE_TABLE + 4 * i);
 
-	__raw_writel(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
-		     ALE_TABLE_CONTROL);
+	writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+		       ALE_TABLE_CONTROL);
 
 	return idx;
 }
@@ -723,7 +723,7 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
 	if (info->port_offset == 0 && info->port_shift == 0)
 		port = 0; /* global, port is a dont care */
 
-	if (port < 0 || port > ale->params.ale_ports)
+	if (port < 0 || port >= ale->params.ale_ports)
 		return -EINVAL;
 
 	mask = BITMASK(info->bits);
@@ -733,9 +733,9 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
 	offset = info->offset + (port * info->port_offset);
 	shift  = info->shift  + (port * info->port_shift);
 
-	tmp = __raw_readl(ale->params.ale_regs + offset);
+	tmp = readl_relaxed(ale->params.ale_regs + offset);
 	tmp = (tmp & ~(mask << shift)) | (value << shift);
-	__raw_writel(tmp, ale->params.ale_regs + offset);
+	writel_relaxed(tmp, ale->params.ale_regs + offset);
 
 	return 0;
 }
@@ -754,13 +754,13 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
 	if (info->port_offset == 0 && info->port_shift == 0)
 		port = 0; /* global, port is a dont care */
 
-	if (port < 0 || port > ale->params.ale_ports)
+	if (port < 0 || port >= ale->params.ale_ports)
 		return -EINVAL;
 
 	offset = info->offset + (port * info->port_offset);
 	shift  = info->shift  + (port * info->port_shift);
 
-	tmp = __raw_readl(ale->params.ale_regs + offset) >> shift;
+	tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
 	return tmp & BITMASK(info->bits);
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
@@ -779,9 +779,37 @@ static void cpsw_ale_timer(struct timer_list *t)
 
 void cpsw_ale_start(struct cpsw_ale *ale)
 {
+	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+	cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+	timer_setup(&ale->timer, cpsw_ale_timer, 0);
+	if (ale->ageout) {
+		ale->timer.expires = jiffies + ale->ageout;
+		add_timer(&ale->timer);
+	}
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_start);
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+	del_timer_sync(&ale->timer);
+	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+}
+EXPORT_SYMBOL_GPL(cpsw_ale_stop);
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+	struct cpsw_ale *ale;
 	u32 rev, ale_entries;
 
-	rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
+	ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
+	if (!ale)
+		return NULL;
+
+	ale->params = *params;
+	ale->ageout = ale->params.ale_ageout * HZ;
+
+	rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
 	if (!ale->params.major_ver_mask)
 		ale->params.major_ver_mask = 0xff;
 	ale->version =
@@ -793,8 +821,8 @@ void cpsw_ale_start(struct cpsw_ale *ale)
 
 	if (!ale->params.ale_entries) {
 		ale_entries =
-			__raw_readl(ale->params.ale_regs + ALE_STATUS) &
-				    ALE_STATUS_SIZE_MASK;
+			readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
+			ALE_STATUS_SIZE_MASK;
 		/* ALE available on newer NetCP switches has introduced
 		 * a register, ALE_STATUS, to indicate the size of ALE
 		 * table which shows the size as a multiple of 1024 entries.
@@ -816,9 +844,9 @@ void cpsw_ale_start(struct cpsw_ale *ale)
 		 "ALE Table size %ld\n", ale->params.ale_entries);
 
 	/* set default bits for existing h/w */
-	ale->port_mask_bits = 3;
-	ale->port_num_bits = 2;
-	ale->vlan_field_bits = 3;
+	ale->port_mask_bits = ale->params.ale_ports;
+	ale->port_num_bits = order_base_2(ale->params.ale_ports);
+	ale->vlan_field_bits = ale->params.ale_ports;
 
 	/* Set defaults override for ALE on NetCP NU switch and for version
 	 * 1R3
@@ -847,57 +875,12 @@ void cpsw_ale_start(struct cpsw_ale *ale)
 		ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
 		ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
 					ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
-		ale->port_mask_bits = ale->params.ale_ports;
-		ale->port_num_bits = ale->params.ale_ports - 1;
-		ale->vlan_field_bits = ale->params.ale_ports;
-	} else if (ale->version == ALE_VERSION_1R3) {
-		ale->port_mask_bits = ale->params.ale_ports;
-		ale->port_num_bits = 3;
-		ale->vlan_field_bits = ale->params.ale_ports;
 	}
 
-	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
-	cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
-
-	timer_setup(&ale->timer, cpsw_ale_timer, 0);
-	if (ale->ageout) {
-		ale->timer.expires = jiffies + ale->ageout;
-		add_timer(&ale->timer);
-	}
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_start);
-
-void cpsw_ale_stop(struct cpsw_ale *ale)
-{
-	del_timer_sync(&ale->timer);
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_stop);
-
-struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
-{
-	struct cpsw_ale *ale;
-
-	ale = kzalloc(sizeof(*ale), GFP_KERNEL);
-	if (!ale)
-		return NULL;
-
-	ale->params = *params;
-	ale->ageout = ale->params.ale_ageout * HZ;
-
 	return ale;
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_create);
 
-int cpsw_ale_destroy(struct cpsw_ale *ale)
-{
-	if (!ale)
-		return -EINVAL;
-	cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
-	kfree(ale);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_destroy);
-
 void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
 {
 	int i;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 25d24e8..d4fe901 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -100,7 +100,6 @@ enum cpsw_ale_port_state {
 #define ALE_ENTRY_WORDS	DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
 
 struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
-int cpsw_ale_destroy(struct cpsw_ale *ale);
 
 void cpsw_ale_start(struct cpsw_ale *ale);
 void cpsw_ale_stop(struct cpsw_ale *ale);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index e4d6edf..6f9173f 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -893,7 +893,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 	chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
 
 	if (__chan_linear(chan_num) >= ctlr->num_chan)
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
 	if (!chan)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4bb5618..abceea8 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1385,11 +1385,6 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
 		return -EOPNOTSUPP;
 }
 
-static int match_first_device(struct device *dev, void *data)
-{
-	return !strncmp(dev_name(dev), "davinci_mdio", 12);
-}
-
 /**
  * emac_dev_open - EMAC device open
  * @ndev: The DaVinci EMAC network adapter
@@ -1489,8 +1484,8 @@ static int emac_dev_open(struct net_device *ndev)
 
 	/* use the first phy on the bus if pdata did not give us a phy id */
 	if (!phydev && !priv->phy_id) {
-		phy = bus_find_device(&mdio_bus_type, NULL, NULL,
-				      match_first_device);
+		phy = bus_find_device_by_name(&mdio_bus_type, NULL,
+					      "davinci_mdio");
 		if (phy) {
 			priv->phy_id = dev_name(phy);
 			if (!priv->phy_id || !*priv->phy_id)
@@ -1875,10 +1870,17 @@ static int davinci_emac_probe(struct platform_device *pdev)
 
 	priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
 					 emac_tx_handler, 0);
+	if (IS_ERR(priv->txchan)) {
+		dev_err(&pdev->dev, "error initializing tx dma channel\n");
+		rc = PTR_ERR(priv->txchan);
+		goto no_cpdma_chan;
+	}
+
 	priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
 					 emac_rx_handler, 1);
-	if (WARN_ON(!priv->txchan || !priv->rxchan)) {
-		rc = -ENOMEM;
+	if (IS_ERR(priv->rxchan)) {
+		dev_err(&pdev->dev, "error initializing rx dma channel\n");
+		rc = PTR_ERR(priv->rxchan);
 		goto no_cpdma_chan;
 	}
 
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index e831c49..56dbc0b 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -27,6 +27,7 @@
 #include <linux/net_tstamp.h>
 #include <linux/ethtool.h>
 
+#include "cpsw.h"
 #include "cpsw_ale.h"
 #include "netcp.h"
 #include "cpts.h"
@@ -2047,10 +2048,6 @@ static const struct ethtool_ops keystone_ethtool_ops = {
 	.get_ts_info		= keystone_get_ts_info,
 };
 
-#define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
-			 ((mac)[2] << 16) | ((mac)[3] << 24))
-#define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
-
 static void gbe_set_slave_mac(struct gbe_slave *slave,
 			      struct gbe_intf *gbe_intf)
 {
@@ -3692,7 +3689,6 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
 	del_timer_sync(&gbe_dev->timer);
 	cpts_release(gbe_dev->cpts);
 	cpsw_ale_stop(gbe_dev->ale);
-	cpsw_ale_destroy(gbe_dev->ale);
 	netcp_txpipe_close(&gbe_dev->tx_pipe);
 	free_secondary_ports(gbe_dev);
 
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 0a48b30..195e0d0 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1652,19 +1652,16 @@ static __net_init int geneve_init_net(struct net *net)
 	return 0;
 }
 
-static void __net_exit geneve_exit_net(struct net *net)
+static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_dev *geneve, *next;
 	struct net_device *dev, *aux;
-	LIST_HEAD(list);
-
-	rtnl_lock();
 
 	/* gather any geneve devices that were moved into this ns */
 	for_each_netdev_safe(net, dev, aux)
 		if (dev->rtnl_link_ops == &geneve_link_ops)
-			unregister_netdevice_queue(dev, &list);
+			unregister_netdevice_queue(dev, head);
 
 	/* now gather any other geneve devices that were created in this ns */
 	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
@@ -1672,18 +1669,29 @@ static void __net_exit geneve_exit_net(struct net *net)
 		 * to the list by the previous loop.
 		 */
 		if (!net_eq(dev_net(geneve->dev), net))
-			unregister_netdevice_queue(geneve->dev, &list);
+			unregister_netdevice_queue(geneve->dev, head);
 	}
 
+	WARN_ON_ONCE(!list_empty(&gn->sock_list));
+}
+
+static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+{
+	struct net *net;
+	LIST_HEAD(list);
+
+	rtnl_lock();
+	list_for_each_entry(net, net_list, exit_list)
+		geneve_destroy_tunnels(net, &list);
+
 	/* unregister the devices gathered above */
 	unregister_netdevice_many(&list);
 	rtnl_unlock();
-	WARN_ON_ONCE(!list_empty(&gn->sock_list));
 }
 
 static struct pernet_operations geneve_net_ops = {
 	.init = geneve_init_net,
-	.exit = geneve_exit_net,
+	.exit_batch = geneve_exit_batch_net,
 	.id   = &geneve_net_id,
 	.size = sizeof(struct geneve_net),
 };
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 88ddfb9..0db3bd1 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -146,7 +146,6 @@ struct hv_netvsc_packet {
 
 struct netvsc_device_info {
 	unsigned char mac_adr[ETH_ALEN];
-	int  ring_size;
 	u32  num_chn;
 	u32  send_sections;
 	u32  recv_sections;
@@ -188,18 +187,22 @@ struct rndis_message;
 struct netvsc_device;
 struct net_device_context;
 
+extern u32 netvsc_ring_bytes;
+extern struct reciprocal_value netvsc_ring_reciprocal;
+
 struct netvsc_device *netvsc_device_add(struct hv_device *device,
 					const struct netvsc_device_info *info);
 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
 void netvsc_device_remove(struct hv_device *device);
-int netvsc_send(struct net_device_context *ndc,
+int netvsc_send(struct net_device *net,
 		struct hv_netvsc_packet *packet,
 		struct rndis_message *rndis_msg,
 		struct hv_page_buffer *page_buffer,
 		struct sk_buff *skb);
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
 				struct rndis_message *resp);
 int netvsc_recv_callback(struct net_device *net,
+			 struct netvsc_device *nvdev,
 			 struct vmbus_channel *channel,
 			 void  *data, u32 len,
 			 const struct ndis_tcp_ip_checksum_info *csum_info,
@@ -220,7 +223,6 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
 			       const u8 *key);
 int rndis_filter_receive(struct net_device *ndev,
 			 struct netvsc_device *net_dev,
-			 struct hv_device *dev,
 			 struct vmbus_channel *channel,
 			 void *data, u32 buflen);
 
@@ -635,14 +637,27 @@ struct nvsp_message {
 #define NETVSC_MTU 65535
 #define NETVSC_MTU_MIN ETH_MIN_MTU
 
-#define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*16)	/* 16MB */
-#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY	(1024*1024*15)  /* 15MB */
-#define NETVSC_SEND_BUFFER_SIZE			(1024 * 1024 * 15)   /* 15MB */
+/* Max buffer sizes allowed by a host */
+#define NETVSC_RECEIVE_BUFFER_SIZE		(1024 * 1024 * 31) /* 31MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY	(1024 * 1024 * 15) /* 15MB */
+#define NETVSC_RECEIVE_BUFFER_DEFAULT		(1024 * 1024 * 16)
+
+#define NETVSC_SEND_BUFFER_SIZE			(1024 * 1024 * 15)  /* 15MB */
+#define NETVSC_SEND_BUFFER_DEFAULT		(1024 * 1024)
+
 #define NETVSC_INVALID_INDEX			-1
 
 #define NETVSC_SEND_SECTION_SIZE		6144
 #define NETVSC_RECV_SECTION_SIZE		1728
 
+/* Default size of TX buf: 1MB, RX buf: 16MB */
+#define NETVSC_MIN_TX_SECTIONS	10
+#define NETVSC_DEFAULT_TX	(NETVSC_SEND_BUFFER_DEFAULT \
+				 / NETVSC_SEND_SECTION_SIZE)
+#define NETVSC_MIN_RX_SECTIONS	10
+#define NETVSC_DEFAULT_RX	(NETVSC_RECEIVE_BUFFER_DEFAULT \
+				 / NETVSC_RECV_SECTION_SIZE)
+
 #define NETVSC_RECEIVE_BUFFER_ID		0xcafe
 #define NETVSC_SEND_BUFFER_ID			0
 
@@ -690,6 +705,7 @@ struct netvsc_ethtool_stats {
 	unsigned long tx_busy;
 	unsigned long tx_send_full;
 	unsigned long rx_comp_busy;
+	unsigned long rx_no_memory;
 	unsigned long stop_queue;
 	unsigned long wake_queue;
 };
@@ -804,13 +820,9 @@ struct netvsc_device {
 
 	struct rndis_device *extension;
 
-	int ring_size;
-
 	u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
 	u32 pkt_align; /* alignment bytes, e.g. 8 */
 
-	atomic_t open_cnt;
-
 	struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
 
 	struct rcu_head rcu;
@@ -1425,32 +1437,6 @@ struct rndis_message {
 	(sizeof(msg) + (sizeof(struct rndis_message) -	\
 	 sizeof(union rndis_message_container)))
 
-/* get pointer to info buffer with message pointer */
-#define MESSAGE_TO_INFO_BUFFER(msg)				\
-	(((unsigned char *)(msg)) + msg->info_buf_offset)
-
-/* get pointer to status buffer with message pointer */
-#define MESSAGE_TO_STATUS_BUFFER(msg)			\
-	(((unsigned char *)(msg)) + msg->status_buf_offset)
-
-/* get pointer to OOBD buffer with message pointer */
-#define MESSAGE_TO_OOBD_BUFFER(msg)				\
-	(((unsigned char *)(msg)) + msg->oob_data_offset)
-
-/* get pointer to data buffer with message pointer */
-#define MESSAGE_TO_DATA_BUFFER(msg)				\
-	(((unsigned char *)(msg)) + msg->per_pkt_info_offset)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg)		\
-	((void *) &rndis_msg->msg)
-
-/* get pointer to contained message from NDIS_MESSAGE pointer */
-#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg)	\
-	((void *) rndis_msg)
-
-
-
 #define RNDIS_HEADER_SIZE	(sizeof(struct rndis_message) - \
 				 sizeof(union rndis_message_container))
 
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index bfc7969..17e529a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -31,6 +31,7 @@
 #include <linux/vmalloc.h>
 #include <linux/rtnetlink.h>
 #include <linux/prefetch.h>
+#include <linux/reciprocal_div.h>
 
 #include <asm/sync_bitops.h>
 
@@ -72,7 +73,7 @@ static struct netvsc_device *alloc_net_device(void)
 
 	init_waitqueue_head(&net_device->wait_drain);
 	net_device->destroy = false;
-	atomic_set(&net_device->open_cnt, 0);
+
 	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
 	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
 
@@ -267,6 +268,11 @@ static int netvsc_init_buf(struct hv_device *device,
 	buf_size = device_info->recv_sections * device_info->recv_section_size;
 	buf_size = roundup(buf_size, PAGE_SIZE);
 
+	/* Legacy hosts only allow smaller receive buffer */
+	if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+		buf_size = min_t(unsigned int, buf_size,
+				 NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
+
 	net_device->recv_buf = vzalloc(buf_size);
 	if (!net_device->recv_buf) {
 		netdev_err(ndev,
@@ -588,14 +594,11 @@ void netvsc_device_remove(struct hv_device *device)
  * Get the percentage of available bytes to write in the ring.
  * The return value is in range from 0 to 100.
  */
-static inline u32 hv_ringbuf_avail_percent(
-		struct hv_ring_buffer_info *ring_info)
+static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
 {
-	u32 avail_read, avail_write;
+	u32 avail_write = hv_get_bytes_to_write(ring_info);
 
-	hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
-
-	return avail_write * 100 / ring_info->ring_datasize;
+	return reciprocal_divide(avail_write  * 100, netvsc_ring_reciprocal);
 }
 
 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
@@ -698,26 +701,26 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
 	return NETVSC_INVALID_INDEX;
 }
 
-static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
-				   unsigned int section_index,
-				   u32 pend_size,
-				   struct hv_netvsc_packet *packet,
-				   struct rndis_message *rndis_msg,
-				   struct hv_page_buffer *pb,
-				   struct sk_buff *skb)
+static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
+				    unsigned int section_index,
+				    u32 pend_size,
+				    struct hv_netvsc_packet *packet,
+				    struct rndis_message *rndis_msg,
+				    struct hv_page_buffer *pb,
+				    bool xmit_more)
 {
 	char *start = net_device->send_buf;
 	char *dest = start + (section_index * net_device->send_section_size)
 		     + pend_size;
 	int i;
-	u32 msg_size = 0;
 	u32 padding = 0;
-	u32 remain = packet->total_data_buflen % net_device->pkt_align;
 	u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
 		packet->page_buf_cnt;
+	u32 remain;
 
 	/* Add padding */
-	if (skb->xmit_more && remain && !packet->cp_partial) {
+	remain = packet->total_data_buflen & (net_device->pkt_align - 1);
+	if (xmit_more && remain) {
 		padding = net_device->pkt_align - remain;
 		rndis_msg->msg_len += padding;
 		packet->total_data_buflen += padding;
@@ -729,16 +732,11 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 		u32 len = pb[i].len;
 
 		memcpy(dest, (src + offset), len);
-		msg_size += len;
 		dest += len;
 	}
 
-	if (padding) {
+	if (padding)
 		memset(dest, 0, padding);
-		msg_size += padding;
-	}
-
-	return msg_size;
 }
 
 static inline int netvsc_send_pkt(
@@ -831,12 +829,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
 }
 
 /* RCU already held by caller */
-int netvsc_send(struct net_device_context *ndev_ctx,
+int netvsc_send(struct net_device *ndev,
 		struct hv_netvsc_packet *packet,
 		struct rndis_message *rndis_msg,
 		struct hv_page_buffer *pb,
 		struct sk_buff *skb)
 {
+	struct net_device_context *ndev_ctx = netdev_priv(ndev);
 	struct netvsc_device *net_device
 		= rcu_dereference_bh(ndev_ctx->nvdev);
 	struct hv_device *device = ndev_ctx->device_ctx;
@@ -847,8 +846,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
 	struct multi_send_data *msdp;
 	struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
 	struct sk_buff *msd_skb = NULL;
-	bool try_batch;
-	bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
+	bool try_batch, xmit_more;
 
 	/* If device is rescinded, return error and packet will get dropped. */
 	if (unlikely(!net_device || net_device->destroy))
@@ -899,10 +897,17 @@ int netvsc_send(struct net_device_context *ndev_ctx,
 		}
 	}
 
+	/* Keep aggregating only if stack says more data is coming
+	 * and not doing mixed modes send and not flow blocked
+	 */
+	xmit_more = skb->xmit_more &&
+		!packet->cp_partial &&
+		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
+
 	if (section_index != NETVSC_INVALID_INDEX) {
 		netvsc_copy_to_send_buf(net_device,
 					section_index, msd_len,
-					packet, rndis_msg, pb, skb);
+					packet, rndis_msg, pb, xmit_more);
 
 		packet->send_buf_index = section_index;
 
@@ -922,7 +927,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
 		if (msdp->skb)
 			dev_consume_skb_any(msdp->skb);
 
-		if (xmit_more && !packet->cp_partial) {
+		if (xmit_more) {
 			msdp->skb = skb;
 			msdp->pkt = packet;
 			msdp->count++;
@@ -1085,7 +1090,7 @@ static int netvsc_receive(struct net_device *ndev,
 		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
 
 		/* Pass it to the upper layer */
-		status = rndis_filter_receive(ndev, net_device, device,
+		status = rndis_filter_receive(ndev, net_device,
 					      channel, data, buflen);
 	}
 
@@ -1249,7 +1254,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 				const struct netvsc_device_info *device_info)
 {
 	int i, ret = 0;
-	int ring_size = device_info->ring_size;
 	struct netvsc_device *net_device;
 	struct net_device *ndev = hv_get_drvdata(device);
 	struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1261,8 +1265,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 	for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
 		net_device_ctx->tx_table[i] = 0;
 
-	net_device->ring_size = ring_size;
-
 	/* Because the device uses NAPI, all the interrupt batching and
 	 * control is done via Net softirq, not the channel handling
 	 */
@@ -1289,10 +1291,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
 		       netvsc_poll, NAPI_POLL_WEIGHT);
 
 	/* Open the channel */
-	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
-			 ring_size * PAGE_SIZE, NULL, 0,
-			 netvsc_channel_cb,
-			 net_device->chan_table);
+	ret = vmbus_open(device->channel, netvsc_ring_bytes,
+			 netvsc_ring_bytes,  NULL, 0,
+			 netvsc_channel_cb, net_device->chan_table);
 
 	if (ret != 0) {
 		netif_napi_del(&net_device->chan_table[0].napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 5129647..c5584c2 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/rtnetlink.h>
 #include <linux/netpoll.h>
+#include <linux/reciprocal_div.h>
 
 #include <net/arp.h>
 #include <net/route.h>
@@ -46,17 +47,15 @@
 #include "hyperv_net.h"
 
 #define RING_SIZE_MIN		64
-#define NETVSC_MIN_TX_SECTIONS	10
-#define NETVSC_DEFAULT_TX	192	/* ~1M */
-#define NETVSC_MIN_RX_SECTIONS	10	/* ~64K */
-#define NETVSC_DEFAULT_RX	10485   /* Max ~16M */
 
 #define LINKCHANGE_INT (2 * HZ)
 #define VF_TAKEOVER_INT (HZ / 10)
 
-static int ring_size = 128;
-module_param(ring_size, int, S_IRUGO);
+static unsigned int ring_size __ro_after_init = 128;
+module_param(ring_size, uint, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+unsigned int netvsc_ring_bytes __ro_after_init;
+struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
 
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
 				NETIF_MSG_LINK | NETIF_MSG_IFUP |
@@ -174,17 +173,15 @@ static int netvsc_close(struct net_device *net)
 	return ret;
 }
 
-static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
-			   int pkt_type)
+static inline void *init_ppi_data(struct rndis_message *msg,
+				  u32 ppi_size, u32 pkt_type)
 {
-	struct rndis_packet *rndis_pkt;
+	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
 	struct rndis_per_packet_info *ppi;
 
-	rndis_pkt = &msg->msg.pkt;
 	rndis_pkt->data_offset += ppi_size;
-
-	ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
-		rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
+	ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
+		+ rndis_pkt->per_pkt_info_len;
 
 	ppi->size = ppi_size;
 	ppi->type = pkt_type;
@@ -192,7 +189,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
 
 	rndis_pkt->per_pkt_info_len += ppi_size;
 
-	return ppi;
+	return ppi + 1;
 }
 
 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
@@ -469,10 +466,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 	int ret;
 	unsigned int num_data_pgs;
 	struct rndis_message *rndis_msg;
-	struct rndis_packet *rndis_pkt;
 	struct net_device *vf_netdev;
 	u32 rndis_msg_size;
-	struct rndis_per_packet_info *ppi;
 	u32 hash;
 	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
 
@@ -527,34 +522,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 
 	rndis_msg = (struct rndis_message *)skb->head;
 
-	memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
-
 	/* Add the rndis header */
 	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
 	rndis_msg->msg_len = packet->total_data_buflen;
-	rndis_pkt = &rndis_msg->msg.pkt;
-	rndis_pkt->data_offset = sizeof(struct rndis_packet);
-	rndis_pkt->data_len = packet->total_data_buflen;
-	rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+
+	rndis_msg->msg.pkt = (struct rndis_packet) {
+		.data_offset = sizeof(struct rndis_packet),
+		.data_len = packet->total_data_buflen,
+		.per_pkt_info_offset = sizeof(struct rndis_packet),
+	};
 
 	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
 
 	hash = skb_get_hash_raw(skb);
 	if (hash != 0 && net->real_num_tx_queues > 1) {
+		u32 *hash_info;
+
 		rndis_msg_size += NDIS_HASH_PPI_SIZE;
-		ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
-				    NBL_HASH_VALUE);
-		*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
+		hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
+					  NBL_HASH_VALUE);
+		*hash_info = hash;
 	}
 
 	if (skb_vlan_tag_present(skb)) {
 		struct ndis_pkt_8021q_info *vlan;
 
 		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
-		ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
-				    IEEE_8021Q_INFO);
+		vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+				     IEEE_8021Q_INFO);
 
-		vlan = (void *)ppi + ppi->ppi_offset;
+		vlan->value = 0;
 		vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
 		vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
 				VLAN_PRIO_SHIFT;
@@ -564,11 +561,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 		struct ndis_tcp_lso_info *lso_info;
 
 		rndis_msg_size += NDIS_LSO_PPI_SIZE;
-		ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
-				    TCP_LARGESEND_PKTINFO);
+		lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+					 TCP_LARGESEND_PKTINFO);
 
-		lso_info = (void *)ppi + ppi->ppi_offset;
-
+		lso_info->value = 0;
 		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
 		if (skb->protocol == htons(ETH_P_IP)) {
 			lso_info->lso_v2_transmit.ip_version =
@@ -593,12 +589,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 			struct ndis_tcp_ip_checksum_info *csum_info;
 
 			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
-			ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
-					    TCPIP_CHKSUM_PKTINFO);
+			csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+						  TCPIP_CHKSUM_PKTINFO);
 
-			csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
-									 ppi->ppi_offset);
-
+			csum_info->value = 0;
 			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
 
 			if (skb->protocol == htons(ETH_P_IP)) {
@@ -632,7 +626,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 	/* timestamp packet in software */
 	skb_tx_timestamp(skb);
 
-	ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
+	ret = netvsc_send(net, packet, rndis_msg, pb, skb);
 	if (likely(ret == 0))
 		return NETDEV_TX_OK;
 
@@ -658,22 +652,14 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 /*
  * netvsc_linkstatus_callback - Link up/down notification
  */
-void netvsc_linkstatus_callback(struct hv_device *device_obj,
+void netvsc_linkstatus_callback(struct net_device *net,
 				struct rndis_message *resp)
 {
 	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
-	struct net_device *net;
-	struct net_device_context *ndev_ctx;
+	struct net_device_context *ndev_ctx = netdev_priv(net);
 	struct netvsc_reconfig *event;
 	unsigned long flags;
 
-	net = hv_get_drvdata(device_obj);
-
-	if (!net)
-		return;
-
-	ndev_ctx = netdev_priv(net);
-
 	/* Update the physical link speed when changing to another vSwitch */
 	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
 		u32 speed;
@@ -753,34 +739,26 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
  * "wire" on the specified device.
  */
 int netvsc_recv_callback(struct net_device *net,
+			 struct netvsc_device *net_device,
 			 struct vmbus_channel *channel,
 			 void  *data, u32 len,
 			 const struct ndis_tcp_ip_checksum_info *csum_info,
 			 const struct ndis_pkt_8021q_info *vlan)
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
-	struct netvsc_device *net_device;
 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
-	struct netvsc_channel *nvchan;
+	struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
 	struct sk_buff *skb;
 	struct netvsc_stats *rx_stats;
 
 	if (net->reg_state != NETREG_REGISTERED)
 		return NVSP_STAT_FAIL;
 
-	rcu_read_lock();
-	net_device = rcu_dereference(net_device_ctx->nvdev);
-	if (unlikely(!net_device))
-		goto drop;
-
-	nvchan = &net_device->chan_table[q_idx];
-
 	/* Allocate a skb - TODO direct I/O to pages? */
 	skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
 				    csum_info, vlan, data, len);
 	if (unlikely(!skb)) {
-drop:
-		++net->stats.rx_dropped;
+		++net_device_ctx->eth_stats.rx_no_memory;
 		rcu_read_unlock();
 		return NVSP_STAT_FAIL;
 	}
@@ -804,8 +782,6 @@ int netvsc_recv_callback(struct net_device *net,
 	u64_stats_update_end(&rx_stats->syncp);
 
 	napi_gro_receive(&nvchan->napi, skb);
-	rcu_read_unlock();
-
 	return 0;
 }
 
@@ -860,7 +836,6 @@ static int netvsc_set_channels(struct net_device *net,
 
 	memset(&device_info, 0, sizeof(device_info));
 	device_info.num_chn = count;
-	device_info.ring_size = ring_size;
 	device_info.send_sections = nvdev->send_section_cnt;
 	device_info.send_section_size = nvdev->send_section_size;
 	device_info.recv_sections = nvdev->recv_section_cnt;
@@ -975,7 +950,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 		rndis_filter_close(nvdev);
 
 	memset(&device_info, 0, sizeof(device_info));
-	device_info.ring_size = ring_size;
 	device_info.num_chn = nvdev->num_chn;
 	device_info.send_sections = nvdev->send_section_cnt;
 	device_info.send_section_size = nvdev->send_section_size;
@@ -1133,12 +1107,13 @@ static const struct {
 	u16 offset;
 } netvsc_stats[] = {
 	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
-	{ "tx_no_memory",  offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
+	{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
 	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
 	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
 	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
 	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
 	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
+	{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
 	{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
 	{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
 }, vf_stats[] = {
@@ -1539,7 +1514,6 @@ static int netvsc_set_ringparam(struct net_device *ndev,
 
 	memset(&device_info, 0, sizeof(device_info));
 	device_info.num_chn = nvdev->num_chn;
-	device_info.ring_size = ring_size;
 	device_info.send_sections = new_tx;
 	device_info.send_section_size = nvdev->send_section_size;
 	device_info.recv_sections = new_rx;
@@ -1995,7 +1969,6 @@ static int netvsc_probe(struct hv_device *dev,
 
 	/* Notify the netvsc driver of the new device */
 	memset(&device_info, 0, sizeof(device_info));
-	device_info.ring_size = ring_size;
 	device_info.num_chn = VRSS_CHANNEL_DEFAULT;
 	device_info.send_sections = NETVSC_DEFAULT_TX;
 	device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
@@ -2158,11 +2131,13 @@ static int __init netvsc_drv_init(void)
 
 	if (ring_size < RING_SIZE_MIN) {
 		ring_size = RING_SIZE_MIN;
-		pr_info("Increased ring_size to %d (min allowed)\n",
+		pr_info("Increased ring_size to %u (min allowed)\n",
 			ring_size);
 	}
-	ret = vmbus_driver_register(&netvsc_drv);
+	netvsc_ring_bytes = ring_size * PAGE_SIZE;
+	netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
 
+	ret = vmbus_driver_register(&netvsc_drv);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 7b637c7..91a67c5 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -134,11 +134,9 @@ static void put_rndis_request(struct rndis_device *dev,
 	kfree(req);
 }
 
-static void dump_rndis_message(struct hv_device *hv_dev,
+static void dump_rndis_message(struct net_device *netdev,
 			       const struct rndis_message *rndis_msg)
 {
-	struct net_device *netdev = hv_get_drvdata(hv_dev);
-
 	switch (rndis_msg->ndis_msg_type) {
 	case RNDIS_MSG_PACKET:
 		netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
@@ -217,7 +215,6 @@ static int rndis_filter_send_request(struct rndis_device *dev,
 	struct hv_netvsc_packet *packet;
 	struct hv_page_buffer page_buf[2];
 	struct hv_page_buffer *pb = page_buf;
-	struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
 	int ret;
 
 	/* Setup the packet to send it */
@@ -245,7 +242,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
 	}
 
 	rcu_read_lock_bh();
-	ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
+	ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL);
 	rcu_read_unlock_bh();
 
 	return ret;
@@ -354,6 +351,7 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
 }
 
 static int rndis_filter_receive_data(struct net_device *ndev,
+				     struct netvsc_device *nvdev,
 				     struct rndis_device *dev,
 				     struct rndis_message *msg,
 				     struct vmbus_channel *channel,
@@ -390,14 +388,14 @@ static int rndis_filter_receive_data(struct net_device *ndev,
 	 */
 	data = (void *)((unsigned long)data + data_offset);
 	csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
-	return netvsc_recv_callback(ndev, channel,
+
+	return netvsc_recv_callback(ndev, nvdev, channel,
 				    data, rndis_pkt->data_len,
 				    csum_info, vlan);
 }
 
 int rndis_filter_receive(struct net_device *ndev,
 			 struct netvsc_device *net_dev,
-			 struct hv_device *dev,
 			 struct vmbus_channel *channel,
 			 void *data, u32 buflen)
 {
@@ -419,11 +417,12 @@ int rndis_filter_receive(struct net_device *ndev,
 	}
 
 	if (netif_msg_rx_status(net_device_ctx))
-		dump_rndis_message(dev, rndis_msg);
+		dump_rndis_message(ndev, rndis_msg);
 
 	switch (rndis_msg->ndis_msg_type) {
 	case RNDIS_MSG_PACKET:
-		return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
+		return rndis_filter_receive_data(ndev, net_dev,
+						 rndis_dev, rndis_msg,
 						 channel, data, buflen);
 	case RNDIS_MSG_INIT_C:
 	case RNDIS_MSG_QUERY_C:
@@ -434,7 +433,7 @@ int rndis_filter_receive(struct net_device *ndev,
 
 	case RNDIS_MSG_INDICATE:
 		/* notification msgs */
-		netvsc_linkstatus_callback(dev, rndis_msg);
+		netvsc_linkstatus_callback(ndev, rndis_msg);
 		break;
 	default:
 		netdev_err(ndev,
@@ -1040,8 +1039,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
 	/* Set the channel before opening.*/
 	nvchan->channel = new_sc;
 
-	ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
-			 nvscdev->ring_size * PAGE_SIZE, NULL, 0,
+	ret = vmbus_open(new_sc, netvsc_ring_bytes,
+			 netvsc_ring_bytes, NULL, 0,
 			 netvsc_channel_cb, nvchan);
 	if (ret == 0)
 		napi_enable(&nvchan->napi);
@@ -1362,9 +1361,6 @@ int rndis_filter_open(struct netvsc_device *nvdev)
 	if (!nvdev)
 		return -EINVAL;
 
-	if (atomic_inc_return(&nvdev->open_cnt) != 1)
-		return 0;
-
 	return rndis_filter_open_device(nvdev->extension);
 }
 
@@ -1373,13 +1369,12 @@ int rndis_filter_close(struct netvsc_device *nvdev)
 	if (!nvdev)
 		return -EINVAL;
 
-	if (atomic_dec_return(&nvdev->open_cnt) != 0)
-		return 0;
-
 	return rndis_filter_close_device(nvdev->extension);
 }
 
 bool rndis_filter_opened(const struct netvsc_device *nvdev)
 {
-	return atomic_read(&nvdev->open_cnt) > 0;
+	const struct rndis_device *dev = nvdev->extension;
+
+	return dev->state == RNDIS_DEV_DATAINITIALIZED;
 }
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 400fdbd..64f1b1e 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1,7 +1,7 @@
 /*
  * Analog Devices ADF7242 Low-Power IEEE 802.15.4 Transceiver
  *
- * Copyright 2009-2015 Analog Devices Inc.
+ * Copyright 2009-2017 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  *
@@ -344,12 +344,18 @@ static int adf7242_wait_status(struct adf7242_local *lp, unsigned int status,
 	return ret;
 }
 
-static int adf7242_wait_ready(struct adf7242_local *lp, int line)
+static int adf7242_wait_rc_ready(struct adf7242_local *lp, int line)
 {
 	return adf7242_wait_status(lp, STAT_RC_READY | STAT_SPI_READY,
 				   STAT_RC_READY | STAT_SPI_READY, line);
 }
 
+static int adf7242_wait_spi_ready(struct adf7242_local *lp, int line)
+{
+	return adf7242_wait_status(lp, STAT_SPI_READY,
+				   STAT_SPI_READY, line);
+}
+
 static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
 {
 	u8 *buf = lp->buf;
@@ -369,7 +375,7 @@ static int adf7242_write_fbuf(struct adf7242_local *lp, u8 *data, u8 len)
 	spi_message_add_tail(&xfer_head, &msg);
 	spi_message_add_tail(&xfer_buf, &msg);
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	buf[0] = CMD_SPI_PKT_WR;
@@ -401,7 +407,7 @@ static int adf7242_read_fbuf(struct adf7242_local *lp,
 	spi_message_add_tail(&xfer_head, &msg);
 	spi_message_add_tail(&xfer_buf, &msg);
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	if (packet_read) {
@@ -432,7 +438,7 @@ static int adf7242_read_reg(struct adf7242_local *lp, u16 addr, u8 *data)
 		.rx_buf = lp->buf_read_rx,
 	};
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	lp->buf_read_tx[0] = CMD_SPI_MEM_RD(addr);
@@ -462,7 +468,7 @@ static int adf7242_write_reg(struct adf7242_local *lp, u16 addr, u8 data)
 {
 	int status;
 
-	adf7242_wait_ready(lp, __LINE__);
+	adf7242_wait_spi_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	lp->buf_reg_tx[0] = CMD_SPI_MEM_WR(addr);
@@ -484,7 +490,7 @@ static int adf7242_cmd(struct adf7242_local *lp, unsigned int cmd)
 	dev_vdbg(&lp->spi->dev, "%s : CMD=0x%X\n", __func__, cmd);
 
 	if (cmd != CMD_RC_PC_RESET_NO_WAIT)
-		adf7242_wait_ready(lp, __LINE__);
+		adf7242_wait_rc_ready(lp, __LINE__);
 
 	mutex_lock(&lp->bmux);
 	lp->buf_cmd = cmd;
@@ -557,6 +563,22 @@ static int adf7242_verify_firmware(struct adf7242_local *lp,
 	return 0;
 }
 
+static void adf7242_clear_irqstat(struct adf7242_local *lp)
+{
+	adf7242_write_reg(lp, REG_IRQ1_SRC1, IRQ_CCA_COMPLETE | IRQ_SFD_RX |
+			  IRQ_SFD_TX | IRQ_RX_PKT_RCVD | IRQ_TX_PKT_SENT |
+			  IRQ_FRAME_VALID | IRQ_ADDRESS_VALID | IRQ_CSMA_CA);
+}
+
+static int adf7242_cmd_rx(struct adf7242_local *lp)
+{
+	/* Wait until the ACK is sent */
+	adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
+	adf7242_clear_irqstat(lp);
+
+	return adf7242_cmd(lp, CMD_RC_RX);
+}
+
 static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
 {
 	struct adf7242_local *lp = hw->priv;
@@ -660,7 +682,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
 	struct adf7242_local *lp = hw->priv;
 
 	adf7242_cmd(lp, CMD_RC_PHY_RDY);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 	enable_irq(lp->spi->irq);
 	set_bit(FLAG_START, &lp->flags);
 
@@ -671,10 +693,10 @@ static void adf7242_stop(struct ieee802154_hw *hw)
 {
 	struct adf7242_local *lp = hw->priv;
 
+	disable_irq(lp->spi->irq);
 	adf7242_cmd(lp, CMD_RC_IDLE);
 	clear_bit(FLAG_START, &lp->flags);
-	disable_irq(lp->spi->irq);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 }
 
 static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
@@ -789,9 +811,12 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 	struct adf7242_local *lp = hw->priv;
 	int ret;
 
+	/* ensure existing instances of the IRQ handler have completed */
+	disable_irq(lp->spi->irq);
 	set_bit(FLAG_XMIT, &lp->flags);
 	reinit_completion(&lp->tx_complete);
 	adf7242_cmd(lp, CMD_RC_PHY_RDY);
+	adf7242_clear_irqstat(lp);
 
 	ret = adf7242_write_fbuf(lp, skb->data, skb->len);
 	if (ret)
@@ -800,6 +825,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 	ret = adf7242_cmd(lp, CMD_RC_CSMACA);
 	if (ret)
 		goto err;
+	enable_irq(lp->spi->irq);
 
 	ret = wait_for_completion_interruptible_timeout(&lp->tx_complete,
 							HZ / 10);
@@ -822,7 +848,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 
 err:
 	clear_bit(FLAG_XMIT, &lp->flags);
-	adf7242_cmd(lp, CMD_RC_RX);
+	adf7242_cmd_rx(lp);
 
 	return ret;
 }
@@ -846,7 +872,7 @@ static int adf7242_rx(struct adf7242_local *lp)
 
 	skb = dev_alloc_skb(len);
 	if (!skb) {
-		adf7242_cmd(lp, CMD_RC_RX);
+		adf7242_cmd_rx(lp);
 		return -ENOMEM;
 	}
 
@@ -854,14 +880,14 @@ static int adf7242_rx(struct adf7242_local *lp)
 	ret = adf7242_read_fbuf(lp, data, len, true);
 	if (ret < 0) {
 		kfree_skb(skb);
-		adf7242_cmd(lp, CMD_RC_RX);
+		adf7242_cmd_rx(lp);
 		return ret;
 	}
 
 	lqi = data[len - 2];
 	lp->rssi = data[len - 1];
 
-	adf7242_cmd(lp, CMD_RC_RX);
+	ret = adf7242_cmd_rx(lp);
 
 	skb_trim(skb, len - 2);	/* Don't put RSSI/LQI or CRC into the frame */
 
@@ -870,7 +896,7 @@ static int adf7242_rx(struct adf7242_local *lp)
 	dev_dbg(&lp->spi->dev, "%s: ret=%d len=%d lqi=%d rssi=%d\n",
 		__func__, ret, (int)len, (int)lqi, lp->rssi);
 
-	return 0;
+	return ret;
 }
 
 static const struct ieee802154_ops adf7242_ops = {
@@ -888,7 +914,7 @@ static const struct ieee802154_ops adf7242_ops = {
 	.set_cca_ed_level = adf7242_set_cca_ed_level,
 };
 
-static void adf7242_debug(u8 irq1)
+static void adf7242_debug(struct adf7242_local *lp, u8 irq1)
 {
 #ifdef DEBUG
 	u8 stat;
@@ -906,9 +932,12 @@ static void adf7242_debug(u8 irq1)
 		irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
 		irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
 
-	dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s%s%s%s%s\n",
+	dev_dbg(&lp->spi->dev, "%s STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n",
 		__func__, stat,
+		stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+		stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
 		stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+		stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
 		(stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
 		(stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
 		(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -923,20 +952,20 @@ static irqreturn_t adf7242_isr(int irq, void *data)
 	unsigned int xmit;
 	u8 irq1;
 
-	adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
-
 	adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, irq1);
 
 	if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
 		dev_err(&lp->spi->dev, "%s :ERROR IRQ1 = 0x%X\n",
 			__func__, irq1);
 
-	adf7242_debug(irq1);
+	adf7242_debug(lp, irq1);
 
 	xmit = test_bit(FLAG_XMIT, &lp->flags);
 
 	if (xmit && (irq1 & IRQ_CSMA_CA)) {
+		adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+				    RC_STATUS_MASK, __LINE__);
+
 		if (ADF7242_REPORT_CSMA_CA_STAT) {
 			u8 astat;
 
@@ -957,6 +986,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
 			lp->tx_stat = SUCCESS;
 		}
 		complete(&lp->tx_complete);
+		adf7242_clear_irqstat(lp);
 	} else if (!xmit && (irq1 & IRQ_RX_PKT_RCVD) &&
 		   (irq1 & IRQ_FRAME_VALID)) {
 		adf7242_rx(lp);
@@ -965,16 +995,19 @@ static irqreturn_t adf7242_isr(int irq, void *data)
 		dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X\n",
 			__func__, __LINE__, irq1);
 		adf7242_cmd(lp, CMD_RC_PHY_RDY);
-		adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
-		adf7242_cmd(lp, CMD_RC_RX);
+		adf7242_cmd_rx(lp);
 	} else {
 		/* This can only be xmit without IRQ, likely a RX packet.
 		 * we get an TX IRQ shortly - do nothing or let the xmit
 		 * timeout handle this
 		 */
+
 		dev_dbg(&lp->spi->dev, "%s:%d : ERROR IRQ1 = 0x%X, xmit %d\n",
 			__func__, __LINE__, irq1, xmit);
+		adf7242_wait_status(lp, RC_STATUS_PHY_RDY,
+				    RC_STATUS_MASK, __LINE__);
 		complete(&lp->tx_complete);
+		adf7242_clear_irqstat(lp);
 	}
 
 	return IRQ_HANDLED;
@@ -994,7 +1027,7 @@ static int adf7242_soft_reset(struct adf7242_local *lp, int line)
 	adf7242_set_promiscuous_mode(lp->hw, lp->promiscuous);
 	adf7242_set_csma_params(lp->hw, lp->min_be, lp->max_be,
 				lp->max_cca_retries);
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 
 	if (test_bit(FLAG_START, &lp->flags)) {
 		enable_irq(lp->spi->irq);
@@ -1060,7 +1093,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
 	adf7242_write_reg(lp, REG_IRQ1_EN0, 0);
 	adf7242_write_reg(lp, REG_IRQ1_EN1, IRQ_RX_PKT_RCVD | IRQ_CSMA_CA);
 
-	adf7242_write_reg(lp, REG_IRQ1_SRC1, 0xFF);
+	adf7242_clear_irqstat(lp);
 	adf7242_write_reg(lp, REG_IRQ1_SRC0, 0xFF);
 
 	adf7242_cmd(lp, CMD_RC_IDLE);
@@ -1086,8 +1119,11 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
 		   irq1 & IRQ_FRAME_VALID ? "IRQ_FRAME_VALID\n" : "",
 		   irq1 & IRQ_ADDRESS_VALID ? "IRQ_ADDRESS_VALID\n" : "");
 
-	seq_printf(file, "STATUS = %X:\n%s\n%s%s%s%s%s\n", stat,
+	seq_printf(file, "STATUS = %X:\n%s\n%s\n%s\n%s\n%s%s%s%s%s\n", stat,
+		   stat & STAT_SPI_READY ? "SPI_READY" : "SPI_BUSY",
+		   stat & STAT_IRQ_STATUS ? "IRQ_PENDING" : "IRQ_CLEAR",
 		   stat & STAT_RC_READY ? "RC_READY" : "RC_BUSY",
+		   stat & STAT_CCA_RESULT ? "CHAN_IDLE" : "CHAN_BUSY",
 		   (stat & 0xf) == RC_STATUS_IDLE ? "RC_STATUS_IDLE" : "",
 		   (stat & 0xf) == RC_STATUS_MEAS ? "RC_STATUS_MEAS" : "",
 		   (stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
@@ -1257,12 +1293,14 @@ static int adf7242_remove(struct spi_device *spi)
 
 static const struct of_device_id adf7242_of_match[] = {
 	{ .compatible = "adi,adf7242", },
+	{ .compatible = "adi,adf7241", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, adf7242_of_match);
 
 static const struct spi_device_id adf7242_device_id[] = {
 	{ .name = "adf7242", },
+	{ .name = "adf7241", },
 	{ },
 };
 MODULE_DEVICE_TABLE(spi, adf7242_device_id);
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 77cc4fb..c1f008f 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -315,13 +315,13 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
 
 		*pskb = skb;
 	}
-	ipvlan_skb_crossing_ns(skb, dev);
 
 	if (local) {
 		skb->pkt_type = PACKET_HOST;
 		if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
 			success = true;
 	} else {
+		skb->dev = dev;
 		ret = RX_HANDLER_ANOTHER;
 		success = true;
 	}
@@ -586,7 +586,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
 		return NET_XMIT_SUCCESS;
 	}
 
-	ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
+	skb->dev = ipvlan->phy_dev;
 	return dev_queue_xmit(skb);
 }
 
@@ -664,8 +664,6 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
 	struct sk_buff *skb = *pskb;
 	struct ethhdr *eth = eth_hdr(skb);
 	rx_handler_result_t ret = RX_HANDLER_PASS;
-	void *lyr3h;
-	int addr_type;
 
 	if (is_multicast_ether_addr(eth->h_dest)) {
 		if (ipvlan_external_frame(skb, port)) {
@@ -683,15 +681,8 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
 			}
 		}
 	} else {
-		struct ipvl_addr *addr;
-
-		lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
-		if (!lyr3h)
-			return ret;
-
-		addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
-		if (addr)
-			ret = ipvlan_rcv_frame(addr, pskb, false);
+		/* Perform like l3 mode for non-multicast packet */
+		ret = ipvlan_handle_mode_l3(pskb, port);
 	}
 
 	return ret;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 30cb803e..2469df1 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -850,6 +850,19 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
 	return ipvlan_del_addr(ipvlan, ip6_addr, true);
 }
 
+static bool ipvlan_is_valid_dev(const struct net_device *dev)
+{
+	struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+	if (!netif_is_ipvlan(dev))
+		return false;
+
+	if (!ipvlan || !ipvlan->port)
+		return false;
+
+	return true;
+}
+
 static int ipvlan_addr6_event(struct notifier_block *unused,
 			      unsigned long event, void *ptr)
 {
@@ -857,10 +870,7 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
 	struct net_device *dev = (struct net_device *)if6->idev->dev;
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
@@ -888,10 +898,7 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
 	if (in_softirq())
 		return NOTIFY_DONE;
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
@@ -932,10 +939,7 @@ static int ipvlan_addr4_event(struct notifier_block *unused,
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
 	struct in_addr ip4_addr;
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
@@ -961,10 +965,7 @@ static int ipvlan_addr4_validator_event(struct notifier_block *unused,
 	struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
 	struct ipvl_dev *ipvlan = netdev_priv(dev);
 
-	if (!netif_is_ipvlan(dev))
-		return NOTIFY_DONE;
-
-	if (!ipvlan || !ipvlan->port)
+	if (!ipvlan_is_valid_dev(dev))
 		return NOTIFY_DONE;
 
 	switch (event) {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 1d025ab..f522715 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -393,7 +393,12 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 #define MACSEC_PORT_SCB (0x0000)
 #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
 
-#define DEFAULT_SAK_LEN 16
+#define MACSEC_GCM_AES_128_SAK_LEN 16
+#define MACSEC_GCM_AES_256_SAK_LEN 32
+
+#define MAX_SAK_LEN MACSEC_GCM_AES_256_SAK_LEN
+
+#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
 #define DEFAULT_SEND_SCI true
 #define DEFAULT_ENCRYPT false
 #define DEFAULT_ENCODING_SA 0
@@ -1600,7 +1605,7 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
 	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
 				   .len = MACSEC_KEYID_LEN, },
 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
-				 .len = MACSEC_MAX_KEY_LEN, },
+				 .len = MAX_SAK_LEN, },
 };
 
 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
@@ -2362,15 +2367,26 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
 {
 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
 	struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+	u64 csid;
 
 	if (!secy_nest)
 		return 1;
 
+	switch (secy->key_len) {
+	case MACSEC_GCM_AES_128_SAK_LEN:
+		csid = MACSEC_CIPHER_ID_GCM_AES_128;
+		break;
+	case MACSEC_GCM_AES_256_SAK_LEN:
+		csid = MACSEC_CIPHER_ID_GCM_AES_256;
+		break;
+	default:
+		goto cancel;
+	}
+
 	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
 			MACSEC_SECY_ATTR_PAD) ||
 	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
-			      MACSEC_DEFAULT_CIPHER_ID,
-			      MACSEC_SECY_ATTR_PAD) ||
+			      csid, MACSEC_SECY_ATTR_PAD) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -3015,8 +3031,8 @@ static void macsec_setup(struct net_device *dev)
 	eth_zero_addr(dev->broadcast);
 }
 
-static void macsec_changelink_common(struct net_device *dev,
-				     struct nlattr *data[])
+static int macsec_changelink_common(struct net_device *dev,
+				    struct nlattr *data[])
 {
 	struct macsec_secy *secy;
 	struct macsec_tx_sc *tx_sc;
@@ -3056,6 +3072,22 @@ static void macsec_changelink_common(struct net_device *dev,
 
 	if (data[IFLA_MACSEC_VALIDATION])
 		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
+
+	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
+		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
+		case MACSEC_CIPHER_ID_GCM_AES_128:
+		case MACSEC_DEFAULT_CIPHER_ALT:
+			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
+			break;
+		case MACSEC_CIPHER_ID_GCM_AES_256:
+			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
 }
 
 static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3071,9 +3103,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
 	    data[IFLA_MACSEC_PORT])
 		return -EINVAL;
 
-	macsec_changelink_common(dev, data);
-
-	return 0;
+	return macsec_changelink_common(dev, data);
 }
 
 static void macsec_del_dev(struct macsec_dev *macsec)
@@ -3270,8 +3300,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
 	if (err)
 		goto unlink;
 
-	if (data)
-		macsec_changelink_common(dev, data);
+	if (data) {
+		err = macsec_changelink_common(dev, data);
+		if (err)
+			goto del_dev;
+	}
 
 	err = register_macsec_dev(real_dev, dev);
 	if (err < 0)
@@ -3320,7 +3353,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
 	}
 
 	switch (csid) {
-	case MACSEC_DEFAULT_CIPHER_ID:
+	case MACSEC_CIPHER_ID_GCM_AES_128:
+	case MACSEC_CIPHER_ID_GCM_AES_256:
 	case MACSEC_DEFAULT_CIPHER_ALT:
 		if (icv_len < MACSEC_MIN_ICV_LEN ||
 		    icv_len > MACSEC_STD_ICV_LEN)
@@ -3390,12 +3424,24 @@ static int macsec_fill_info(struct sk_buff *skb,
 {
 	struct macsec_secy *secy = &macsec_priv(dev)->secy;
 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+	u64 csid;
+
+	switch (secy->key_len) {
+	case MACSEC_GCM_AES_128_SAK_LEN:
+		csid = MACSEC_CIPHER_ID_GCM_AES_128;
+		break;
+	case MACSEC_GCM_AES_256_SAK_LEN:
+		csid = MACSEC_CIPHER_ID_GCM_AES_256;
+		break;
+	default:
+		goto nla_put_failure;
+	}
 
 	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
 			IFLA_MACSEC_PAD) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
 	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
-			      MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
+			      csid, IFLA_MACSEC_PAD) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
new file mode 100644
index 0000000..074ddeb
--- /dev/null
+++ b/drivers/net/netdevsim/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NETDEVSIM) += netdevsim.o
+
+netdevsim-objs := \
+	netdev.o \
+	bpf.o \
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
new file mode 100644
index 0000000..5134d5c
--- /dev/null
+++ b/drivers/net/netdevsim/bpf.c
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+
+#include "netdevsim.h"
+
+struct nsim_bpf_bound_prog {
+	struct netdevsim *ns;
+	struct bpf_prog *prog;
+	struct dentry *ddir;
+	const char *state;
+	bool is_loaded;
+	struct list_head l;
+};
+
+static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
+{
+	const char **str = file->private;
+
+	if (*str)
+		seq_printf(file, "%s\n", *str);
+
+	return 0;
+}
+
+static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private);
+}
+
+static const struct file_operations nsim_bpf_string_fops = {
+	.owner = THIS_MODULE,
+	.open = nsim_debugfs_bpf_string_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek
+};
+
+static int
+nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	state = env->prog->aux->offload->dev_priv;
+	if (state->ns->bpf_bind_verifier_delay && !insn_idx)
+		msleep(state->ns->bpf_bind_verifier_delay);
+
+	return 0;
+}
+
+static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
+	.insn_hook = nsim_bpf_verify_insn,
+};
+
+static bool nsim_xdp_offload_active(struct netdevsim *ns)
+{
+	return ns->xdp_prog_mode == XDP_ATTACHED_HW;
+}
+
+static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	if (!prog || !prog->aux->offload)
+		return;
+
+	state = prog->aux->offload->dev_priv;
+	state->is_loaded = loaded;
+}
+
+static int
+nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog)
+{
+	nsim_prog_set_loaded(ns->bpf_offloaded, false);
+
+	WARN(!!ns->bpf_offloaded != oldprog,
+	     "bad offload state, expected offload %sto be active",
+	     oldprog ? "" : "not ");
+	ns->bpf_offloaded = prog;
+	ns->bpf_offloaded_id = prog ? prog->aux->id : 0;
+	nsim_prog_set_loaded(prog, true);
+
+	return 0;
+}
+
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+			       void *type_data, void *cb_priv)
+{
+	struct tc_cls_bpf_offload *cls_bpf = type_data;
+	struct bpf_prog *prog = cls_bpf->prog;
+	struct netdevsim *ns = cb_priv;
+	struct bpf_prog *oldprog;
+
+	if (type != TC_SETUP_CLSBPF ||
+	    !tc_can_offload(ns->netdev) ||
+	    cls_bpf->common.protocol != htons(ETH_P_ALL) ||
+	    cls_bpf->common.chain_index)
+		return -EOPNOTSUPP;
+
+	if (!ns->bpf_tc_accept)
+		return -EOPNOTSUPP;
+	/* Note: progs without skip_sw will probably not be dev bound */
+	if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept)
+		return -EOPNOTSUPP;
+
+	if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
+		return -EOPNOTSUPP;
+
+	oldprog = cls_bpf->oldprog;
+
+	/* Don't remove if oldprog doesn't match driver's state */
+	if (ns->bpf_offloaded != oldprog) {
+		oldprog = NULL;
+		if (!cls_bpf->prog)
+			return 0;
+		if (ns->bpf_offloaded)
+			return -EBUSY;
+	}
+
+	return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
+}
+
+int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+	if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns))
+		return -EBUSY;
+	return 0;
+}
+
+static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	if (!nsim_xdp_offload_active(ns) && !bpf->prog)
+		return 0;
+	if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) {
+		NSIM_EA(bpf->extack, "TC program is already loaded");
+		return -EBUSY;
+	}
+
+	return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
+}
+
+static int nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	int err;
+
+	if (ns->xdp_prog && (bpf->flags ^ ns->xdp_flags) & XDP_FLAGS_MODES) {
+		NSIM_EA(bpf->extack, "program loaded with different flags");
+		return -EBUSY;
+	}
+
+	if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
+		NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
+		return -EOPNOTSUPP;
+	}
+	if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) {
+		NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS");
+		return -EOPNOTSUPP;
+	}
+
+	if (bpf->command == XDP_SETUP_PROG_HW) {
+		err = nsim_xdp_offload_prog(ns, bpf);
+		if (err)
+			return err;
+	}
+
+	if (ns->xdp_prog)
+		bpf_prog_put(ns->xdp_prog);
+
+	ns->xdp_prog = bpf->prog;
+	ns->xdp_flags = bpf->flags;
+
+	if (!bpf->prog)
+		ns->xdp_prog_mode = XDP_ATTACHED_NONE;
+	else if (bpf->command == XDP_SETUP_PROG)
+		ns->xdp_prog_mode = XDP_ATTACHED_DRV;
+	else
+		ns->xdp_prog_mode = XDP_ATTACHED_HW;
+
+	return 0;
+}
+
+static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
+{
+	struct nsim_bpf_bound_prog *state;
+	char name[16];
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->ns = ns;
+	state->prog = prog;
+	state->state = "verify";
+
+	/* Program id is not populated yet when we create the state. */
+	sprintf(name, "%u", ns->prog_id_gen++);
+	state->ddir = debugfs_create_dir(name, ns->ddir_bpf_bound_progs);
+	if (IS_ERR_OR_NULL(state->ddir)) {
+		kfree(state);
+		return -ENOMEM;
+	}
+
+	debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
+	debugfs_create_file("state", 0400, state->ddir,
+			    &state->state, &nsim_bpf_string_fops);
+	debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
+
+	list_add_tail(&state->l, &ns->bpf_bound_progs);
+
+	prog->aux->offload->dev_priv = state;
+
+	return 0;
+}
+
+static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	state = prog->aux->offload->dev_priv;
+	WARN(state->is_loaded,
+	     "offload state destroyed while program still bound");
+	debugfs_remove_recursive(state->ddir);
+	list_del(&state->l);
+	kfree(state);
+}
+
+static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	if (bpf->prog && bpf->prog->aux->offload) {
+		NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
+		return -EINVAL;
+	}
+	if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+		NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
+		return -EINVAL;
+	}
+	if (nsim_xdp_offload_active(ns)) {
+		NSIM_EA(bpf->extack, "xdp offload active, can't load drv prog");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int
+nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+	struct nsim_bpf_bound_prog *state;
+
+	if (!bpf->prog)
+		return 0;
+
+	if (!bpf->prog->aux->offload) {
+		NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
+		return -EINVAL;
+	}
+	if (bpf->prog->aux->offload->netdev != ns->netdev) {
+		NSIM_EA(bpf->extack, "program bound to different dev");
+		return -EINVAL;
+	}
+
+	state = bpf->prog->aux->offload->dev_priv;
+	if (WARN_ON(strcmp(state->state, "xlated"))) {
+		NSIM_EA(bpf->extack, "offloading program in bad state");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+	struct nsim_bpf_bound_prog *state;
+	int err;
+
+	ASSERT_RTNL();
+
+	switch (bpf->command) {
+	case BPF_OFFLOAD_VERIFIER_PREP:
+		if (!ns->bpf_bind_accept)
+			return -EOPNOTSUPP;
+
+		err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
+		if (err)
+			return err;
+
+		bpf->verifier.ops = &nsim_bpf_analyzer_ops;
+		return 0;
+	case BPF_OFFLOAD_TRANSLATE:
+		state = bpf->offload.prog->aux->offload->dev_priv;
+
+		state->state = "xlated";
+		return 0;
+	case BPF_OFFLOAD_DESTROY:
+		nsim_bpf_destroy_prog(bpf->offload.prog);
+		return 0;
+	case XDP_QUERY_PROG:
+		bpf->prog_attached = ns->xdp_prog_mode;
+		bpf->prog_id = ns->xdp_prog ? ns->xdp_prog->aux->id : 0;
+		bpf->prog_flags = ns->xdp_prog ? ns->xdp_flags : 0;
+		return 0;
+	case XDP_SETUP_PROG:
+		err = nsim_setup_prog_checks(ns, bpf);
+		if (err)
+			return err;
+
+		return nsim_xdp_set_prog(ns, bpf);
+	case XDP_SETUP_PROG_HW:
+		err = nsim_setup_prog_hw_checks(ns, bpf);
+		if (err)
+			return err;
+
+		return nsim_xdp_set_prog(ns, bpf);
+	default:
+		return -EINVAL;
+	}
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+	INIT_LIST_HEAD(&ns->bpf_bound_progs);
+
+	debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
+			   &ns->bpf_offloaded_id);
+
+	ns->bpf_bind_accept = true;
+	debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir,
+			    &ns->bpf_bind_accept);
+	debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
+			   &ns->bpf_bind_verifier_delay);
+	ns->ddir_bpf_bound_progs =
+		debugfs_create_dir("bpf_bound_progs", ns->ddir);
+	if (IS_ERR_OR_NULL(ns->ddir_bpf_bound_progs))
+		return -ENOMEM;
+
+	ns->bpf_tc_accept = true;
+	debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
+			    &ns->bpf_tc_accept);
+	debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir,
+			    &ns->bpf_tc_non_bound_accept);
+	ns->bpf_xdpdrv_accept = true;
+	debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir,
+			    &ns->bpf_xdpdrv_accept);
+	ns->bpf_xdpoffload_accept = true;
+	debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
+			    &ns->bpf_xdpoffload_accept);
+
+	return 0;
+}
+
+void nsim_bpf_uninit(struct netdevsim *ns)
+{
+	WARN_ON(!list_empty(&ns->bpf_bound_progs));
+	WARN_ON(ns->xdp_prog);
+	WARN_ON(ns->bpf_offloaded);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
new file mode 100644
index 0000000..3fd5679
--- /dev/null
+++ b/drivers/net/netdevsim/netdev.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <net/netlink.h>
+#include <net/pkt_cls.h>
+#include <net/rtnetlink.h>
+
+#include "netdevsim.h"
+
+struct nsim_vf_config {
+	int link_state;
+	u16 min_tx_rate;
+	u16 max_tx_rate;
+	u16 vlan;
+	__be16 vlan_proto;
+	u16 qos;
+	u8 vf_mac[ETH_ALEN];
+	bool spoofchk_enabled;
+	bool trusted;
+	bool rss_query_enabled;
+};
+
+static u32 nsim_dev_id;
+
+static int nsim_num_vf(struct device *dev)
+{
+	struct netdevsim *ns = to_nsim(dev);
+
+	return ns->num_vfs;
+}
+
+static struct bus_type nsim_bus = {
+	.name		= DRV_NAME,
+	.dev_name	= DRV_NAME,
+	.num_vf		= nsim_num_vf,
+};
+
+static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs)
+{
+	ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config),
+				GFP_KERNEL);
+	if (!ns->vfconfigs)
+		return -ENOMEM;
+	ns->num_vfs = num_vfs;
+
+	return 0;
+}
+
+static void nsim_vfs_disable(struct netdevsim *ns)
+{
+	kfree(ns->vfconfigs);
+	ns->vfconfigs = NULL;
+	ns->num_vfs = 0;
+}
+
+static ssize_t
+nsim_numvfs_store(struct device *dev, struct device_attribute *attr,
+		  const char *buf, size_t count)
+{
+	struct netdevsim *ns = to_nsim(dev);
+	unsigned int num_vfs;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &num_vfs);
+	if (ret)
+		return ret;
+
+	rtnl_lock();
+	if (ns->num_vfs == num_vfs)
+		goto exit_good;
+	if (ns->num_vfs && num_vfs) {
+		ret = -EBUSY;
+		goto exit_unlock;
+	}
+
+	if (num_vfs) {
+		ret = nsim_vfs_enable(ns, num_vfs);
+		if (ret)
+			goto exit_unlock;
+	} else {
+		nsim_vfs_disable(ns);
+	}
+exit_good:
+	ret = count;
+exit_unlock:
+	rtnl_unlock();
+
+	return ret;
+}
+
+static ssize_t
+nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct netdevsim *ns = to_nsim(dev);
+
+	return sprintf(buf, "%u\n", ns->num_vfs);
+}
+
+static struct device_attribute nsim_numvfs_attr =
+	__ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store);
+
+static struct attribute *nsim_dev_attrs[] = {
+	&nsim_numvfs_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group nsim_dev_attr_group = {
+	.attrs = nsim_dev_attrs,
+};
+
+static const struct attribute_group *nsim_dev_attr_groups[] = {
+	&nsim_dev_attr_group,
+	NULL,
+};
+
+static void nsim_dev_release(struct device *dev)
+{
+	struct netdevsim *ns = to_nsim(dev);
+
+	nsim_vfs_disable(ns);
+	free_netdev(ns->netdev);
+}
+
+static struct device_type nsim_dev_type = {
+	.groups = nsim_dev_attr_groups,
+	.release = nsim_dev_release,
+};
+
+static int nsim_init(struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+	int err;
+
+	ns->netdev = dev;
+	ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
+	if (IS_ERR_OR_NULL(ns->ddir))
+		return -ENOMEM;
+
+	err = nsim_bpf_init(ns);
+	if (err)
+		goto err_debugfs_destroy;
+
+	ns->dev.id = nsim_dev_id++;
+	ns->dev.bus = &nsim_bus;
+	ns->dev.type = &nsim_dev_type;
+	err = device_register(&ns->dev);
+	if (err)
+		goto err_bpf_uninit;
+
+	SET_NETDEV_DEV(dev, &ns->dev);
+
+	return 0;
+
+err_bpf_uninit:
+	nsim_bpf_uninit(ns);
+err_debugfs_destroy:
+	debugfs_remove_recursive(ns->ddir);
+	return err;
+}
+
+static void nsim_uninit(struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	debugfs_remove_recursive(ns->ddir);
+	nsim_bpf_uninit(ns);
+}
+
+static void nsim_free(struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	device_unregister(&ns->dev);
+	/* netdev and vf state will be freed out of device_release() */
+}
+
+static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	u64_stats_update_begin(&ns->syncp);
+	ns->tx_packets++;
+	ns->tx_bytes += skb->len;
+	u64_stats_update_end(&ns->syncp);
+
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+static void nsim_set_rx_mode(struct net_device *dev)
+{
+}
+
+static int nsim_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (ns->xdp_prog_mode == XDP_ATTACHED_DRV &&
+	    new_mtu > NSIM_XDP_MAX_MTU)
+		return -EBUSY;
+
+	dev->mtu = new_mtu;
+
+	return 0;
+}
+
+static void
+nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+	unsigned int start;
+
+	do {
+		start = u64_stats_fetch_begin(&ns->syncp);
+		stats->tx_bytes = ns->tx_bytes;
+		stats->tx_packets = ns->tx_packets;
+	} while (u64_stats_fetch_retry(&ns->syncp, start));
+}
+
+static int
+nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+	return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
+}
+
+static int
+nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+		return -EOPNOTSUPP;
+
+	switch (f->command) {
+	case TC_BLOCK_BIND:
+		return tcf_block_cb_register(f->block, nsim_setup_tc_block_cb,
+					     ns, ns);
+	case TC_BLOCK_UNBIND:
+		tcf_block_cb_unregister(f->block, nsim_setup_tc_block_cb, ns);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	/* Only refuse multicast addresses, zero address can mean unset/any. */
+	if (vf >= ns->num_vfs || is_multicast_ether_addr(mac))
+		return -EINVAL;
+	memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+
+	return 0;
+}
+
+static int nsim_set_vf_vlan(struct net_device *dev, int vf,
+			    u16 vlan, u8 qos, __be16 vlan_proto)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs || vlan > 4095 || qos > 7)
+		return -EINVAL;
+
+	ns->vfconfigs[vf].vlan = vlan;
+	ns->vfconfigs[vf].qos = qos;
+	ns->vfconfigs[vf].vlan_proto = vlan_proto;
+
+	return 0;
+}
+
+static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+
+	ns->vfconfigs[vf].min_tx_rate = min;
+	ns->vfconfigs[vf].max_tx_rate = max;
+
+	return 0;
+}
+
+static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+	ns->vfconfigs[vf].spoofchk_enabled = val;
+
+	return 0;
+}
+
+static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+	ns->vfconfigs[vf].rss_query_enabled = val;
+
+	return 0;
+}
+
+static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+	ns->vfconfigs[vf].trusted = val;
+
+	return 0;
+}
+
+static int
+nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+
+	ivi->vf = vf;
+	ivi->linkstate = ns->vfconfigs[vf].link_state;
+	ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate;
+	ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate;
+	ivi->vlan = ns->vfconfigs[vf].vlan;
+	ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto;
+	ivi->qos = ns->vfconfigs[vf].qos;
+	memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN);
+	ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled;
+	ivi->trusted = ns->vfconfigs[vf].trusted;
+	ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled;
+
+	return 0;
+}
+
+static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if (vf >= ns->num_vfs)
+		return -EINVAL;
+
+	switch (state) {
+	case IFLA_VF_LINK_STATE_AUTO:
+	case IFLA_VF_LINK_STATE_ENABLE:
+	case IFLA_VF_LINK_STATE_DISABLE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ns->vfconfigs[vf].link_state = state;
+
+	return 0;
+}
+
+static int
+nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
+{
+	switch (type) {
+	case TC_SETUP_BLOCK:
+		return nsim_setup_tc_block(dev, type_data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int
+nsim_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct netdevsim *ns = netdev_priv(dev);
+
+	if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC))
+		return nsim_bpf_disable_tc(ns);
+
+	return 0;
+}
+
+static const struct net_device_ops nsim_netdev_ops = {
+	.ndo_init		= nsim_init,
+	.ndo_uninit		= nsim_uninit,
+	.ndo_start_xmit		= nsim_start_xmit,
+	.ndo_set_rx_mode	= nsim_set_rx_mode,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_change_mtu		= nsim_change_mtu,
+	.ndo_get_stats64	= nsim_get_stats64,
+	.ndo_set_vf_mac		= nsim_set_vf_mac,
+	.ndo_set_vf_vlan	= nsim_set_vf_vlan,
+	.ndo_set_vf_rate	= nsim_set_vf_rate,
+	.ndo_set_vf_spoofchk	= nsim_set_vf_spoofchk,
+	.ndo_set_vf_trust	= nsim_set_vf_trust,
+	.ndo_get_vf_config	= nsim_get_vf_config,
+	.ndo_set_vf_link_state	= nsim_set_vf_link_state,
+	.ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
+	.ndo_setup_tc		= nsim_setup_tc,
+	.ndo_set_features	= nsim_set_features,
+	.ndo_bpf		= nsim_bpf,
+};
+
+static void nsim_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+	eth_hw_addr_random(dev);
+
+	dev->netdev_ops = &nsim_netdev_ops;
+	dev->priv_destructor = nsim_free;
+
+	dev->tx_queue_len = 0;
+	dev->flags |= IFF_NOARP;
+	dev->flags &= ~IFF_MULTICAST;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
+			   IFF_NO_QUEUE;
+	dev->features |= NETIF_F_HIGHDMA |
+			 NETIF_F_SG |
+			 NETIF_F_FRAGLIST |
+			 NETIF_F_HW_CSUM |
+			 NETIF_F_TSO;
+	dev->hw_features |= NETIF_F_HW_TC;
+	dev->max_mtu = ETH_MAX_MTU;
+}
+
+static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
+			 struct netlink_ext_ack *extack)
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static struct rtnl_link_ops nsim_link_ops __read_mostly = {
+	.kind		= DRV_NAME,
+	.priv_size	= sizeof(struct netdevsim),
+	.setup		= nsim_setup,
+	.validate	= nsim_validate,
+};
+
+struct dentry *nsim_ddir;
+
+static int __init nsim_module_init(void)
+{
+	int err;
+
+	nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
+	if (IS_ERR_OR_NULL(nsim_ddir))
+		return -ENOMEM;
+
+	err = bus_register(&nsim_bus);
+	if (err)
+		goto err_debugfs_destroy;
+
+	err = rtnl_link_register(&nsim_link_ops);
+	if (err)
+		goto err_unreg_bus;
+
+	return 0;
+
+err_unreg_bus:
+	bus_unregister(&nsim_bus);
+err_debugfs_destroy:
+	debugfs_remove_recursive(nsim_ddir);
+	return err;
+}
+
+static void __exit nsim_module_exit(void)
+{
+	rtnl_link_unregister(&nsim_link_ops);
+	bus_unregister(&nsim_bus);
+	debugfs_remove_recursive(nsim_ddir);
+}
+
+module_init(nsim_module_init);
+module_exit(nsim_module_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
new file mode 100644
index 0000000..32270de
--- /dev/null
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+#define DRV_NAME	"netdevsim"
+
+#define NSIM_XDP_MAX_MTU	4000
+
+#define NSIM_EA(extack, msg)	NL_SET_ERR_MSG_MOD((extack), msg)
+
+struct bpf_prog;
+struct dentry;
+struct nsim_vf_config;
+
+struct netdevsim {
+	struct net_device *netdev;
+
+	u64 tx_packets;
+	u64 tx_bytes;
+	struct u64_stats_sync syncp;
+
+	struct device dev;
+
+	struct dentry *ddir;
+
+	unsigned int num_vfs;
+	struct nsim_vf_config *vfconfigs;
+
+	struct bpf_prog	*bpf_offloaded;
+	u32 bpf_offloaded_id;
+
+	u32 xdp_flags;
+	int xdp_prog_mode;
+	struct bpf_prog	*xdp_prog;
+
+	u32 prog_id_gen;
+
+	bool bpf_bind_accept;
+	u32 bpf_bind_verifier_delay;
+	struct dentry *ddir_bpf_bound_progs;
+	struct list_head bpf_bound_progs;
+
+	bool bpf_tc_accept;
+	bool bpf_tc_non_bound_accept;
+	bool bpf_xdpdrv_accept;
+	bool bpf_xdpoffload_accept;
+};
+
+extern struct dentry *nsim_ddir;
+
+int nsim_bpf_init(struct netdevsim *ns);
+void nsim_bpf_uninit(struct netdevsim *ns);
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int nsim_bpf_disable_tc(struct netdevsim *ns);
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+			       void *type_data, void *cb_priv);
+
+static inline struct netdevsim *to_nsim(struct device *ptr)
+{
+	return container_of(ptr, struct netdevsim, dev);
+}
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 18141c0..6fe5dc9 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -68,8 +68,6 @@ static struct phy_driver am79c_driver[] = { {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= am79c_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= am79c_ack_interrupt,
 	.config_intr	= am79c_config_intr,
 } };
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index e911e49..411cf10 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -71,7 +71,6 @@ MODULE_LICENSE("GPL");
 
 struct at803x_priv {
 	bool phy_reset:1;
-	struct gpio_desc *gpiod_reset;
 };
 
 struct at803x_context {
@@ -216,56 +215,33 @@ static int at803x_suspend(struct phy_device *phydev)
 	int value;
 	int wol_enabled;
 
-	mutex_lock(&phydev->lock);
-
 	value = phy_read(phydev, AT803X_INTR_ENABLE);
 	wol_enabled = value & AT803X_INTR_ENABLE_WOL;
 
-	value = phy_read(phydev, MII_BMCR);
-
 	if (wol_enabled)
-		value |= BMCR_ISOLATE;
+		value = BMCR_ISOLATE;
 	else
-		value |= BMCR_PDOWN;
+		value = BMCR_PDOWN;
 
-	phy_write(phydev, MII_BMCR, value);
-
-	mutex_unlock(&phydev->lock);
+	phy_modify(phydev, MII_BMCR, 0, value);
 
 	return 0;
 }
 
 static int at803x_resume(struct phy_device *phydev)
 {
-	int value;
-
-	value = phy_read(phydev, MII_BMCR);
-	value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
-	phy_write(phydev, MII_BMCR, value);
-
-	return 0;
+	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
 }
 
 static int at803x_probe(struct phy_device *phydev)
 {
 	struct device *dev = &phydev->mdio.dev;
 	struct at803x_priv *priv;
-	struct gpio_desc *gpiod_reset;
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
 
-	if (phydev->drv->phy_id != ATH8030_PHY_ID)
-		goto does_not_require_reset_workaround;
-
-	gpiod_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
-	if (IS_ERR(gpiod_reset))
-		return PTR_ERR(gpiod_reset);
-
-	priv->gpiod_reset = gpiod_reset;
-
-does_not_require_reset_workaround:
 	phydev->priv = priv;
 
 	return 0;
@@ -339,14 +315,14 @@ static void at803x_link_change_notify(struct phy_device *phydev)
 	 * cannot recover from by software.
 	 */
 	if (phydev->state == PHY_NOLINK) {
-		if (priv->gpiod_reset && !priv->phy_reset) {
+		if (phydev->mdio.reset && !priv->phy_reset) {
 			struct at803x_context context;
 
 			at803x_context_save(phydev, &context);
 
-			gpiod_set_value(priv->gpiod_reset, 1);
+			phy_device_reset(phydev, 1);
 			msleep(1);
-			gpiod_set_value(priv->gpiod_reset, 0);
+			phy_device_reset(phydev, 0);
 			msleep(1);
 
 			at803x_context_restore(phydev, &context);
@@ -404,8 +380,6 @@ static struct phy_driver at803x_driver[] = {
 	.resume			= at803x_resume,
 	.features		= PHY_GBIT_FEATURES,
 	.flags			= PHY_HAS_INTERRUPT,
-	.config_aneg		= genphy_config_aneg,
-	.read_status		= genphy_read_status,
 	.ack_interrupt		= at803x_ack_interrupt,
 	.config_intr		= at803x_config_intr,
 }, {
@@ -422,8 +396,6 @@ static struct phy_driver at803x_driver[] = {
 	.resume			= at803x_resume,
 	.features		= PHY_BASIC_FEATURES,
 	.flags			= PHY_HAS_INTERRUPT,
-	.config_aneg		= genphy_config_aneg,
-	.read_status		= genphy_read_status,
 	.ack_interrupt		= at803x_ack_interrupt,
 	.config_intr		= at803x_config_intr,
 }, {
@@ -439,8 +411,6 @@ static struct phy_driver at803x_driver[] = {
 	.resume			= at803x_resume,
 	.features		= PHY_GBIT_FEATURES,
 	.flags			= PHY_HAS_INTERRUPT,
-	.config_aneg		= genphy_config_aneg,
-	.read_status		= genphy_read_status,
 	.aneg_done		= at803x_aneg_done,
 	.ack_interrupt		= &at803x_ack_interrupt,
 	.config_intr		= &at803x_config_intr,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 3fe8cc5..6838129 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -136,8 +136,6 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
 	.name          = "Broadcom Cygnus PHY",
 	.features      = PHY_GBIT_FEATURES,
 	.config_init   = bcm_cygnus_config_init,
-	.config_aneg   = genphy_config_aneg,
-	.read_status   = genphy_read_status,
 	.ack_interrupt = bcm_phy_ack_intr,
 	.config_intr   = bcm_phy_config_intr,
 	.suspend       = genphy_suspend,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index b0492ef..cf14613 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -69,8 +69,6 @@ static struct phy_driver bcm63xx_driver[] = {
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
 	.config_init	= bcm63xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm63xx_config_intr,
 }, {
@@ -81,8 +79,6 @@ static struct phy_driver bcm63xx_driver[] = {
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
 	.flags		= PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
 	.config_init	= bcm63xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm63xx_config_intr,
 } };
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8b33f68..421feb8 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -611,8 +611,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 	.features	= PHY_GBIT_FEATURES,				\
 	.flags		= PHY_IS_INTERNAL,				\
 	.config_init	= bcm7xxx_28nm_config_init,			\
-	.config_aneg	= genphy_config_aneg,				\
-	.read_status	= genphy_read_status,				\
 	.resume		= bcm7xxx_28nm_resume,				\
 	.get_tunable	= bcm7xxx_28nm_get_tunable,			\
 	.set_tunable	= bcm7xxx_28nm_set_tunable,			\
@@ -630,8 +628,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 	.features	= PHY_BASIC_FEATURES,				\
 	.flags		= PHY_IS_INTERNAL,				\
 	.config_init	= bcm7xxx_28nm_ephy_config_init,		\
-	.config_aneg	= genphy_config_aneg,				\
-	.read_status	= genphy_read_status,				\
 	.resume		= bcm7xxx_28nm_ephy_resume,			\
 	.get_sset_count	= bcm_phy_get_sset_count,			\
 	.get_strings	= bcm_phy_get_strings,				\
@@ -647,8 +643,6 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 	.features       = PHY_BASIC_FEATURES,				\
 	.flags          = PHY_IS_INTERNAL,				\
 	.config_init    = bcm7xxx_config_init,				\
-	.config_aneg    = genphy_config_aneg,				\
-	.read_status    = genphy_read_status,				\
 	.suspend        = bcm7xxx_suspend,				\
 	.resume         = bcm7xxx_config_init,				\
 }
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index d7ed69d..3bb6b66 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -540,6 +540,37 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
 	return err;
 }
 
+struct bcm53xx_phy_priv {
+	u64	*stats;
+};
+
+static int bcm53xx_phy_probe(struct phy_device *phydev)
+{
+	struct bcm53xx_phy_priv *priv;
+
+	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	phydev->priv = priv;
+
+	priv->stats = devm_kcalloc(&phydev->mdio.dev,
+				   bcm_phy_get_sset_count(phydev), sizeof(u64),
+				   GFP_KERNEL);
+	if (!priv->stats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void bcm53xx_phy_get_stats(struct phy_device *phydev,
+				  struct ethtool_stats *stats, u64 *data)
+{
+	struct bcm53xx_phy_priv *priv = phydev->priv;
+
+	bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
 static struct phy_driver broadcom_drivers[] = {
 {
 	.phy_id		= PHY_ID_BCM5411,
@@ -548,8 +579,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -559,8 +588,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -570,8 +597,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -581,8 +606,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -592,8 +615,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -603,8 +624,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -614,8 +633,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -626,7 +643,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= bcm5481_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -637,7 +653,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = bcm54xx_config_init,
 	.config_aneg    = bcm5481_config_aneg,
-	.read_status    = genphy_read_status,
 	.ack_interrupt  = bcm_phy_ack_intr,
 	.config_intr    = bcm_phy_config_intr,
 }, {
@@ -647,7 +662,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm5482_config_init,
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= bcm5482_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
@@ -658,8 +672,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -669,8 +681,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -680,8 +690,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= bcm54xx_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= bcm_phy_ack_intr,
 	.config_intr	= bcm_phy_config_intr,
 }, {
@@ -691,8 +699,6 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= brcm_fet_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= brcm_fet_ack_interrupt,
 	.config_intr	= brcm_fet_config_intr,
 }, {
@@ -702,10 +708,18 @@ static struct phy_driver broadcom_drivers[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= brcm_fet_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= brcm_fet_ack_interrupt,
 	.config_intr	= brcm_fet_config_intr,
+}, {
+	.phy_id		= PHY_ID_BCM5395,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM5395",
+	.flags		= PHY_IS_INTERNAL,
+	.features	= PHY_GBIT_FEATURES,
+	.get_sset_count	= bcm_phy_get_sset_count,
+	.get_strings	= bcm_phy_get_strings,
+	.get_stats	= bcm53xx_phy_get_stats,
+	.probe		= bcm53xx_phy_probe,
 } };
 
 module_phy_driver(broadcom_drivers);
@@ -726,6 +740,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
 	{ PHY_ID_BCM57780, 0xfffffff0 },
 	{ PHY_ID_BCMAC131, 0xfffffff0 },
 	{ PHY_ID_BCM5241, 0xfffffff0 },
+	{ PHY_ID_BCM5395, 0xfffffff0 },
 	{ }
 };
 
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d339c1a..c05af00 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -110,8 +110,6 @@ static struct phy_driver cis820x_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &cis820x_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &cis820x_ack_interrupt,
 	.config_intr	= &cis820x_config_intr,
 }, {
@@ -121,8 +119,6 @@ static struct phy_driver cis820x_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &cis820x_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &cis820x_ack_interrupt,
 	.config_intr	= &cis820x_config_intr,
 } };
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index e28913d..5ee99b3 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -153,7 +153,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 }, {
@@ -164,7 +163,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 }, {
@@ -175,7 +173,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= dm9161_config_init,
 	.config_aneg	= dm9161_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 }, {
@@ -184,8 +181,6 @@ static struct phy_driver dm91xx_driver[] = {
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= dm9161_ack_interrupt,
 	.config_intr	= dm9161_config_intr,
 } };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index cbd6298..654f42d 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1502,8 +1502,6 @@ static struct phy_driver dp83640_driver = {
 	.probe		= dp83640_probe,
 	.remove		= dp83640_remove,
 	.config_init	= dp83640_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt  = dp83640_ack_interrupt,
 	.config_intr    = dp83640_config_intr,
 	.ts_info	= dp83640_ts_info,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 14335d1..6e8a2a4 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -325,8 +325,6 @@ static struct phy_driver dp83822_driver[] = {
 		.set_wol = dp83822_set_wol,
 		.ack_interrupt = dp83822_ack_interrupt,
 		.config_intr = dp83822_config_intr,
-		.config_aneg = genphy_config_aneg,
-		.read_status = genphy_read_status,
 		.suspend = dp83822_suspend,
 		.resume = dp83822_resume,
 	 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 3966d43..cd09c3a 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -95,8 +95,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
 		.config_init	= genphy_config_init,		\
 		.suspend	= genphy_suspend,		\
 		.resume		= genphy_resume,		\
-		.config_aneg	= genphy_config_aneg,		\
-		.read_status	= genphy_read_status,		\
 								\
 		/* IRQ related */				\
 		.ack_interrupt	= dp83848_ack_interrupt,	\
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c1ab976..ab58224 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -324,8 +324,6 @@ static struct phy_driver dp83867_driver[] = {
 		.ack_interrupt	= dp83867_ack_interrupt,
 		.config_intr	= dp83867_config_intr,
 
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.suspend	= genphy_suspend,
 		.resume		= genphy_resume,
 	},
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index eb51672..001fe1d 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -115,37 +115,6 @@ int fixed_phy_set_link_update(struct phy_device *phydev,
 }
 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update);
 
-int fixed_phy_update_state(struct phy_device *phydev,
-			   const struct fixed_phy_status *status,
-			   const struct fixed_phy_status *changed)
-{
-	struct fixed_mdio_bus *fmb = &platform_fmb;
-	struct fixed_phy *fp;
-
-	if (!phydev || phydev->mdio.bus != fmb->mii_bus)
-		return -EINVAL;
-
-	list_for_each_entry(fp, &fmb->phys, node) {
-		if (fp->addr == phydev->mdio.addr) {
-			write_seqcount_begin(&fp->seqcount);
-#define _UPD(x) if (changed->x) \
-	fp->status.x = status->x
-			_UPD(link);
-			_UPD(speed);
-			_UPD(duplex);
-			_UPD(pause);
-			_UPD(asym_pause);
-#undef _UPD
-			fixed_phy_update(fp);
-			write_seqcount_end(&fp->seqcount);
-			return 0;
-		}
-	}
-
-	return -ENOENT;
-}
-EXPORT_SYMBOL(fixed_phy_update_state);
-
 int fixed_phy_add(unsigned int irq, int phy_addr,
 		  struct fixed_phy_status *status,
 		  int link_gpio)
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 567280a..791587a 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -227,8 +227,6 @@ static struct phy_driver icplus_driver[] = {
 	.phy_id_mask	= 0x0ffffff0,
 	.features	= PHY_GBIT_FEATURES,
 	.config_init	= &ip1001_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 }, {
@@ -239,8 +237,6 @@ static struct phy_driver icplus_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.ack_interrupt	= ip101a_g_ack_interrupt,
 	.config_init	= &ip101a_g_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 } };
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 55f8c52..a11f80c 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -243,7 +243,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -257,7 +256,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -271,7 +269,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -285,7 +282,6 @@ static struct phy_driver xway_gphy[] = {
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
 		.config_aneg	= xway_gphy14_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -298,8 +294,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -312,8 +306,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_BASIC_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -326,8 +318,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
@@ -340,8 +330,6 @@ static struct phy_driver xway_gphy[] = {
 		.features	= PHY_BASIC_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
 		.config_init	= xway_gphy_config_init,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= xway_gphy_ack_interrupt,
 		.did_interrupt	= xway_gphy_did_interrupt,
 		.config_intr	= xway_gphy_config_intr,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 09d2151..c14b254 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -259,8 +259,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= lxt970_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= lxt970_ack_interrupt,
 	.config_intr	= lxt970_config_intr,
 }, {
@@ -269,8 +267,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.phy_id_mask	= 0xfffffff0,
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= lxt971_ack_interrupt,
 	.config_intr	= lxt971_config_intr,
 }, {
@@ -290,7 +286,6 @@ static struct phy_driver lxt97x_driver[] = {
 	.flags		= 0,
 	.probe		= lxt973_probe,
 	.config_aneg	= lxt973_config_aneg,
-	.read_status	= genphy_read_status,
 } };
 
 module_phy_driver(lxt97x_driver);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 82104edca..22d9bc9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -83,7 +83,7 @@
 #define MII_88E1121_PHY_MSCR_REG	21
 #define MII_88E1121_PHY_MSCR_RX_DELAY	BIT(5)
 #define MII_88E1121_PHY_MSCR_TX_DELAY	BIT(4)
-#define MII_88E1121_PHY_MSCR_DELAY_MASK	(~(BIT(5) | BIT(4)))
+#define MII_88E1121_PHY_MSCR_DELAY_MASK	(BIT(5) | BIT(4))
 
 #define MII_88E1121_MISC_TEST				0x1a
 #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK	0x1f00
@@ -96,6 +96,17 @@
 #define MII_88E1510_TEMP_SENSOR		0x1b
 #define MII_88E1510_TEMP_SENSOR_MASK	0xff
 
+#define MII_88E6390_MISC_TEST		0x1b
+#define MII_88E6390_MISC_TEST_SAMPLE_1S		0
+#define MII_88E6390_MISC_TEST_SAMPLE_10MS	BIT(14)
+#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE	BIT(15)
+#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE	0
+#define MII_88E6390_MISC_TEST_SAMPLE_MASK	(0x3 << 14)
+
+#define MII_88E6390_TEMP_SENSOR		0x1c
+#define MII_88E6390_TEMP_SENSOR_MASK	0xff
+#define MII_88E6390_TEMP_SENSOR_SAMPLES 10
+
 #define MII_88E1318S_PHY_MSCR1_REG	16
 #define MII_88E1318S_PHY_MSCR1_PAD_ODD	BIT(6)
 
@@ -177,9 +188,14 @@ struct marvell_priv {
 	struct device *hwmon_dev;
 };
 
-static int marvell_get_page(struct phy_device *phydev)
+static int marvell_read_page(struct phy_device *phydev)
 {
-	return phy_read(phydev, MII_MARVELL_PHY_PAGE);
+	return __phy_read(phydev, MII_MARVELL_PHY_PAGE);
+}
+
+static int marvell_write_page(struct phy_device *phydev, int page)
+{
+	return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
 }
 
 static int marvell_set_page(struct phy_device *phydev, int page)
@@ -187,19 +203,6 @@ static int marvell_set_page(struct phy_device *phydev, int page)
 	return phy_write(phydev, MII_MARVELL_PHY_PAGE, page);
 }
 
-static int marvell_get_set_page(struct phy_device *phydev, int page)
-{
-	int oldpage = marvell_get_page(phydev);
-
-	if (oldpage < 0)
-		return oldpage;
-
-	if (page != oldpage)
-		return marvell_set_page(phydev, page);
-
-	return 0;
-}
-
 static int marvell_ack_interrupt(struct phy_device *phydev)
 {
 	int err;
@@ -399,7 +402,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev)
 static int marvell_of_reg_init(struct phy_device *phydev)
 {
 	const __be32 *paddr;
-	int len, i, saved_page, current_page, ret;
+	int len, i, saved_page, current_page, ret = 0;
 
 	if (!phydev->mdio.dev.of_node)
 		return 0;
@@ -409,12 +412,11 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 	if (!paddr || len < (4 * sizeof(*paddr)))
 		return 0;
 
-	saved_page = marvell_get_page(phydev);
+	saved_page = phy_save_page(phydev);
 	if (saved_page < 0)
-		return saved_page;
+		goto err;
 	current_page = saved_page;
 
-	ret = 0;
 	len /= sizeof(*paddr);
 	for (i = 0; i < len - 3; i += 4) {
 		u16 page = be32_to_cpup(paddr + i);
@@ -425,14 +427,14 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 
 		if (page != current_page) {
 			current_page = page;
-			ret = marvell_set_page(phydev, page);
+			ret = marvell_write_page(phydev, page);
 			if (ret < 0)
 				goto err;
 		}
 
 		val = 0;
 		if (mask) {
-			val = phy_read(phydev, reg);
+			val = __phy_read(phydev, reg);
 			if (val < 0) {
 				ret = val;
 				goto err;
@@ -441,17 +443,12 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 		}
 		val |= val_bits;
 
-		ret = phy_write(phydev, reg, val);
+		ret = __phy_write(phydev, reg, val);
 		if (ret < 0)
 			goto err;
 	}
 err:
-	if (current_page != saved_page) {
-		i = marvell_set_page(phydev, saved_page);
-		if (ret == 0)
-			ret = i;
-	}
-	return ret;
+	return phy_restore_page(phydev, saved_page, ret);
 }
 #else
 static int marvell_of_reg_init(struct phy_device *phydev)
@@ -462,34 +459,21 @@ static int marvell_of_reg_init(struct phy_device *phydev)
 
 static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
 {
-	int err, oldpage, mscr;
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
-	if (oldpage < 0)
-		return oldpage;
-
-	mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG);
-	if (mscr < 0) {
-		err = mscr;
-		goto out;
-	}
-
-	mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK;
+	int mscr;
 
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
-		mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY |
-			 MII_88E1121_PHY_MSCR_TX_DELAY);
+		mscr = MII_88E1121_PHY_MSCR_RX_DELAY |
+		       MII_88E1121_PHY_MSCR_TX_DELAY;
 	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
-		mscr |= MII_88E1121_PHY_MSCR_RX_DELAY;
+		mscr = MII_88E1121_PHY_MSCR_RX_DELAY;
 	else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
-		mscr |= MII_88E1121_PHY_MSCR_TX_DELAY;
+		mscr = MII_88E1121_PHY_MSCR_TX_DELAY;
+	else
+		mscr = 0;
 
-	err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
-
-out:
-	marvell_set_page(phydev, oldpage);
-
-	return err;
+	return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+				MII_88E1121_PHY_MSCR_REG,
+				MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
 }
 
 static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -498,7 +482,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
 
 	if (phy_interface_is_rgmii(phydev)) {
 		err = m88e1121_config_aneg_rgmii_delays(phydev);
-		if (err)
+		if (err < 0)
 			return err;
 	}
 
@@ -515,20 +499,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
 
 static int m88e1318_config_aneg(struct phy_device *phydev)
 {
-	int err, oldpage, mscr;
+	int err;
 
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE);
-	if (oldpage < 0)
-		return oldpage;
-
-	mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
-	mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
-
-	err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
-	if (err < 0)
-		return err;
-
-	err = marvell_set_page(phydev, oldpage);
+	err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+			       MII_88E1318S_PHY_MSCR1_REG,
+			       0, MII_88E1318S_PHY_MSCR1_PAD_ODD);
 	if (err < 0)
 		return err;
 
@@ -700,19 +675,14 @@ static int m88e1116r_config_init(struct phy_device *phydev)
 
 static int m88e3016_config_init(struct phy_device *phydev)
 {
-	int reg;
+	int ret;
 
 	/* Enable Scrambler and Auto-Crossover */
-	reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL);
-	if (reg < 0)
-		return reg;
-
-	reg &= ~MII_88E3016_DISABLE_SCRAMBLER;
-	reg |= MII_88E3016_AUTO_MDIX_CROSSOVER;
-
-	reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg);
-	if (reg < 0)
-		return reg;
+	ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL,
+			 MII_88E3016_DISABLE_SCRAMBLER,
+			 MII_88E3016_AUTO_MDIX_CROSSOVER);
+	if (ret < 0)
+		return ret;
 
 	return marvell_config_init(phydev);
 }
@@ -721,42 +691,33 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev,
 					   u16 mode,
 					   int fibre_copper_auto)
 {
-	int temp;
-
-	temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
-	if (temp < 0)
-		return temp;
-
-	temp &= ~(MII_M1111_HWCFG_MODE_MASK |
-		  MII_M1111_HWCFG_FIBER_COPPER_AUTO |
-		  MII_M1111_HWCFG_FIBER_COPPER_RES);
-	temp |= mode;
-
 	if (fibre_copper_auto)
-		temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+		mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO;
 
-	return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+	return phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+			  MII_M1111_HWCFG_MODE_MASK |
+			  MII_M1111_HWCFG_FIBER_COPPER_AUTO |
+			  MII_M1111_HWCFG_FIBER_COPPER_RES,
+			  mode);
 }
 
 static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
 {
-	int temp;
-
-	temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
-	if (temp < 0)
-		return temp;
+	int delay;
 
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
-		temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY);
+		delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY;
 	} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
-		temp &= ~MII_M1111_RGMII_TX_DELAY;
-		temp |= MII_M1111_RGMII_RX_DELAY;
+		delay = MII_M1111_RGMII_RX_DELAY;
 	} else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
-		temp &= ~MII_M1111_RGMII_RX_DELAY;
-		temp |= MII_M1111_RGMII_TX_DELAY;
+		delay = MII_M1111_RGMII_TX_DELAY;
+	} else {
+		delay = 0;
 	}
 
-	return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+	return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
+			  MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY,
+			  delay);
 }
 
 static int m88e1111_config_init_rgmii(struct phy_device *phydev)
@@ -802,7 +763,7 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev)
 	int err;
 
 	err = m88e1111_config_init_rgmii_delays(phydev);
-	if (err)
+	if (err < 0)
 		return err;
 
 	err = m88e1111_config_init_hwcfg_mode(
@@ -829,7 +790,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
 
 	if (phy_interface_is_rgmii(phydev)) {
 		err = m88e1111_config_init_rgmii(phydev);
-		if (err)
+		if (err < 0)
 			return err;
 	}
 
@@ -854,20 +815,15 @@ static int m88e1111_config_init(struct phy_device *phydev)
 
 static int m88e1121_config_init(struct phy_device *phydev)
 {
-	int err, oldpage;
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE);
-	if (oldpage < 0)
-		return oldpage;
+	int err;
 
 	/* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
-	err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
-			MII_88E1121_PHY_LED_DEF);
+	err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+			      MII_88E1121_PHY_LED_CTRL,
+			      MII_88E1121_PHY_LED_DEF);
 	if (err < 0)
 		return err;
 
-	marvell_set_page(phydev, oldpage);
-
 	/* Set marvell,reg-init configuration from device tree */
 	return marvell_config_init(phydev);
 }
@@ -875,7 +831,6 @@ static int m88e1121_config_init(struct phy_device *phydev)
 static int m88e1510_config_init(struct phy_device *phydev)
 {
 	int err;
-	int temp;
 
 	/* SGMII-to-Copper mode initialization */
 	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
@@ -887,16 +842,15 @@ static int m88e1510_config_init(struct phy_device *phydev)
 			return err;
 
 		/* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */
-		temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1);
-		temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK;
-		temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII;
-		err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+		err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1,
+				 MII_88E1510_GEN_CTRL_REG_1_MODE_MASK,
+				 MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII);
 		if (err < 0)
 			return err;
 
 		/* PHY reset is necessary after changing MODE[2:0] */
-		temp |= MII_88E1510_GEN_CTRL_REG_1_RESET;
-		err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp);
+		err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0,
+				 MII_88E1510_GEN_CTRL_REG_1_RESET);
 		if (err < 0)
 			return err;
 
@@ -1002,7 +956,6 @@ static int m88e1149_config_init(struct phy_device *phydev)
 
 static int m88e1145_config_init_rgmii(struct phy_device *phydev)
 {
-	int temp;
 	int err;
 
 	err = m88e1111_config_init_rgmii_delays(phydev);
@@ -1014,15 +967,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev)
 		if (err < 0)
 			return err;
 
-		temp = phy_read(phydev, 0x1e);
-		if (temp < 0)
-			return temp;
-
-		temp &= 0xf03f;
-		temp |= 2 << 9;	/* 36 ohm */
-		temp |= 2 << 6;	/* 39 ohm */
-
-		err = phy_write(phydev, 0x1e, temp);
+		err = phy_modify(phydev, 0x1e, 0x0fc0,
+				 2 << 9 | /* 36 ohm */
+				 2 << 6); /* 39 ohm */
 		if (err < 0)
 			return err;
 
@@ -1398,100 +1345,98 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
 static void m88e1318_get_wol(struct phy_device *phydev,
 			     struct ethtool_wolinfo *wol)
 {
+	int oldpage, ret = 0;
+
 	wol->supported = WAKE_MAGIC;
 	wol->wolopts = 0;
 
-	if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0)
-		return;
+	oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE);
+	if (oldpage < 0)
+		goto error;
 
-	if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
-	    MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+	ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+	if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
 		wol->wolopts |= WAKE_MAGIC;
 
-	if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0)
-		return;
+error:
+	phy_restore_page(phydev, oldpage, ret);
 }
 
 static int m88e1318_set_wol(struct phy_device *phydev,
 			    struct ethtool_wolinfo *wol)
 {
-	int err, oldpage, temp;
+	int err = 0, oldpage;
 
-	oldpage = marvell_get_page(phydev);
+	oldpage = phy_save_page(phydev);
+	if (oldpage < 0)
+		goto error;
 
 	if (wol->wolopts & WAKE_MAGIC) {
 		/* Explicitly switch to page 0x00, just to be sure */
-		err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Enable the WOL interrupt */
-		temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
-		temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
-		err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
+				   MII_88E1318S_PHY_CSIER_WOL_EIE);
 		if (err < 0)
-			return err;
+			goto error;
 
-		err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_LED_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Setup LED[2] as interrupt pin (active low) */
-		temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
-		temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
-		temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
-		temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
-		err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR,
+				   MII_88E1318S_PHY_LED_TCR_FORCE_INT,
+				   MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
+				   MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
 		if (err < 0)
-			return err;
+			goto error;
 
-		err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Store the device address for the magic packet */
-		err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
+		err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
 				((phydev->attached_dev->dev_addr[5] << 8) |
 				 phydev->attached_dev->dev_addr[4]));
 		if (err < 0)
-			return err;
-		err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
+			goto error;
+		err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
 				((phydev->attached_dev->dev_addr[3] << 8) |
 				 phydev->attached_dev->dev_addr[2]));
 		if (err < 0)
-			return err;
-		err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
+			goto error;
+		err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
 				((phydev->attached_dev->dev_addr[1] << 8) |
 				 phydev->attached_dev->dev_addr[0]));
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Clear WOL status and enable magic packet matching */
-		temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
-		temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
-		temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
-		err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0,
+				   MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
+				   MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE);
 		if (err < 0)
-			return err;
+			goto error;
 	} else {
-		err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE);
+		err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE);
 		if (err < 0)
-			return err;
+			goto error;
 
 		/* Clear WOL status and disable magic packet matching */
-		temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
-		temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
-		temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
-		err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+		err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL,
+				   MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE,
+				   MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS);
 		if (err < 0)
-			return err;
+			goto error;
 	}
 
-	err = marvell_set_page(phydev, oldpage);
-	if (err < 0)
-		return err;
-
-	return 0;
+error:
+	return phy_restore_page(phydev, oldpage, err);
 }
 
 static int marvell_get_sset_count(struct phy_device *phydev)
@@ -1519,14 +1464,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
 {
 	struct marvell_hw_stat stat = marvell_hw_stats[i];
 	struct marvell_priv *priv = phydev->priv;
-	int oldpage, val;
+	int val;
 	u64 ret;
 
-	oldpage = marvell_get_set_page(phydev, stat.page);
-	if (oldpage < 0)
-		return UINT64_MAX;
-
-	val = phy_read(phydev, stat.reg);
+	val = phy_read_paged(phydev, stat.page, stat.reg);
 	if (val < 0) {
 		ret = UINT64_MAX;
 	} else {
@@ -1535,8 +1476,6 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
 		ret = priv->stats[i];
 	}
 
-	marvell_set_page(phydev, oldpage);
-
 	return ret;
 }
 
@@ -1553,51 +1492,44 @@ static void marvell_get_stats(struct phy_device *phydev,
 static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
 {
 	int oldpage;
-	int ret;
+	int ret = 0;
 	int val;
 
 	*temp = 0;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
+	oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+	if (oldpage < 0)
+		goto error;
 
 	/* Enable temperature sensor */
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+	ret = __phy_read(phydev, MII_88E1121_MISC_TEST);
 	if (ret < 0)
 		goto error;
 
-	ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-			ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+	ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+			  ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
 	if (ret < 0)
 		goto error;
 
 	/* Wait for temperature to stabilize */
 	usleep_range(10000, 12000);
 
-	val = phy_read(phydev, MII_88E1121_MISC_TEST);
+	val = __phy_read(phydev, MII_88E1121_MISC_TEST);
 	if (val < 0) {
 		ret = val;
 		goto error;
 	}
 
 	/* Disable temperature sensor */
-	ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-			ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+	ret = __phy_write(phydev, MII_88E1121_MISC_TEST,
+			  ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
 	if (ret < 0)
 		goto error;
 
 	*temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
 
 error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return phy_restore_page(phydev, oldpage, ret);
 }
 
 static int m88e1121_hwmon_read(struct device *dev,
@@ -1671,118 +1603,64 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
 
 static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
 {
-	int oldpage;
 	int ret;
 
 	*temp = 0;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+	ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+			     MII_88E1510_TEMP_SENSOR);
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	*temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
 
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return 0;
 }
 
 static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
 {
-	int oldpage;
 	int ret;
 
 	*temp = 0;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+	ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+			     MII_88E1121_MISC_TEST);
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	*temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
 		  MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
 	/* convert to mC */
 	*temp *= 1000;
 
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return 0;
 }
 
 static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
 {
-	int oldpage;
-	int ret;
-
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
-	if (ret < 0)
-		goto error;
-
 	temp = temp / 1000;
 	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-	ret = phy_write(phydev, MII_88E1121_MISC_TEST,
-			(ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
-			(temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
 
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+				MII_88E1121_MISC_TEST,
+				MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK,
+				temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT);
 }
 
 static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
 {
-	int oldpage;
 	int ret;
 
 	*alarm = false;
 
-	mutex_lock(&phydev->lock);
-
-	oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
-	if (oldpage < 0) {
-		mutex_unlock(&phydev->lock);
-		return oldpage;
-	}
-
-	ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+	ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE,
+			     MII_88E1121_MISC_TEST);
 	if (ret < 0)
-		goto error;
+		return ret;
+
 	*alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
 
-error:
-	marvell_set_page(phydev, oldpage);
-	mutex_unlock(&phydev->lock);
-
-	return ret;
+	return 0;
 }
 
 static int m88e1510_hwmon_read(struct device *dev,
@@ -1871,6 +1749,123 @@ static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
 	.info = m88e1510_hwmon_info,
 };
 
+static int m88e6390_get_temp(struct phy_device *phydev, long *temp)
+{
+	int sum = 0;
+	int oldpage;
+	int ret = 0;
+	int i;
+
+	*temp = 0;
+
+	oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE);
+	if (oldpage < 0)
+		goto error;
+
+	/* Enable temperature sensor */
+	ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+	if (ret < 0)
+		goto error;
+
+	ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+	ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE |
+		MII_88E6390_MISC_TEST_SAMPLE_1S;
+
+	ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+	if (ret < 0)
+		goto error;
+
+	/* Wait for temperature to stabilize */
+	usleep_range(10000, 12000);
+
+	/* Reading the temperature sense has an errata. You need to read
+	 * a number of times and take an average.
+	 */
+	for (i = 0; i < MII_88E6390_TEMP_SENSOR_SAMPLES; i++) {
+		ret = __phy_read(phydev, MII_88E6390_TEMP_SENSOR);
+		if (ret < 0)
+			goto error;
+		sum += ret & MII_88E6390_TEMP_SENSOR_MASK;
+	}
+
+	sum /= MII_88E6390_TEMP_SENSOR_SAMPLES;
+	*temp = (sum  - 75) * 1000;
+
+	/* Disable temperature sensor */
+	ret = __phy_read(phydev, MII_88E6390_MISC_TEST);
+	if (ret < 0)
+		goto error;
+
+	ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK;
+	ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE;
+
+	ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret);
+
+error:
+	phy_restore_page(phydev, oldpage, ret);
+
+	return ret;
+}
+
+static int m88e6390_hwmon_read(struct device *dev,
+			       enum hwmon_sensor_types type,
+			       u32 attr, int channel, long *temp)
+{
+	struct phy_device *phydev = dev_get_drvdata(dev);
+	int err;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		err = m88e6390_get_temp(phydev, temp);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return err;
+}
+
+static umode_t m88e6390_hwmon_is_visible(const void *data,
+					 enum hwmon_sensor_types type,
+					 u32 attr, int channel)
+{
+	if (type != hwmon_temp)
+		return 0;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		return 0444;
+	default:
+		return 0;
+	}
+}
+
+static u32 m88e6390_hwmon_temp_config[] = {
+	HWMON_T_INPUT,
+	0
+};
+
+static const struct hwmon_channel_info m88e6390_hwmon_temp = {
+	.type = hwmon_temp,
+	.config = m88e6390_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e6390_hwmon_info[] = {
+	&m88e1121_hwmon_chip,
+	&m88e6390_hwmon_temp,
+	NULL
+};
+
+static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = {
+	.is_visible = m88e6390_hwmon_is_visible,
+	.read = m88e6390_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e6390_hwmon_chip_info = {
+	.ops = &m88e6390_hwmon_hwmon_ops,
+	.info = m88e6390_hwmon_info,
+};
+
 static int marvell_hwmon_name(struct phy_device *phydev)
 {
 	struct marvell_priv *priv = phydev->priv;
@@ -1917,6 +1912,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
 {
 	return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
 }
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+	return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info);
+}
 #else
 static int m88e1121_hwmon_probe(struct phy_device *phydev)
 {
@@ -1927,6 +1927,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev)
 {
 	return 0;
 }
+
+static int m88e6390_hwmon_probe(struct phy_device *phydev)
+{
+	return 0;
+}
 #endif
 
 static int marvell_probe(struct phy_device *phydev)
@@ -1964,6 +1969,17 @@ static int m88e1510_probe(struct phy_device *phydev)
 	return m88e1510_hwmon_probe(phydev);
 }
 
+static int m88e6390_probe(struct phy_device *phydev)
+{
+	int err;
+
+	err = marvell_probe(phydev);
+	if (err)
+		return err;
+
+	return m88e6390_hwmon_probe(phydev);
+}
+
 static struct phy_driver marvell_drivers[] = {
 	{
 		.phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,11 +1990,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1101_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -1992,11 +2009,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1111_config_init,
 		.config_aneg = &marvell_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2015,6 +2033,8 @@ static struct phy_driver marvell_drivers[] = {
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2028,11 +2048,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1118_config_init,
 		.config_aneg = &m88e1118_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2052,6 +2073,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2073,6 +2096,8 @@ static struct phy_driver marvell_drivers[] = {
 		.set_wol = &m88e1318_set_wol,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2091,6 +2116,8 @@ static struct phy_driver marvell_drivers[] = {
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2104,11 +2131,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1149_config_init,
 		.config_aneg = &m88e1118_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2122,11 +2150,12 @@ static struct phy_driver marvell_drivers[] = {
 		.probe = marvell_probe,
 		.config_init = &m88e1111_config_init,
 		.config_aneg = &marvell_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2139,12 +2168,12 @@ static struct phy_driver marvell_drivers[] = {
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1116r_config_init,
-		.config_aneg = &genphy_config_aneg,
-		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2166,6 +2195,8 @@ static struct phy_driver marvell_drivers[] = {
 		.set_wol = &m88e1318_set_wol,
 		.resume = &marvell_resume,
 		.suspend = &marvell_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2186,6 +2217,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2205,6 +2238,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2216,7 +2251,6 @@ static struct phy_driver marvell_drivers[] = {
 		.features = PHY_BASIC_FEATURES,
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
-		.config_aneg = &genphy_config_aneg,
 		.config_init = &m88e3016_config_init,
 		.aneg_done = &marvell_aneg_done,
 		.read_status = &marvell_read_status,
@@ -2225,6 +2259,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
@@ -2235,7 +2271,7 @@ static struct phy_driver marvell_drivers[] = {
 		.name = "Marvell 88E6390",
 		.features = PHY_GBIT_FEATURES,
 		.flags = PHY_HAS_INTERRUPT,
-		.probe = m88e1510_probe,
+		.probe = m88e6390_probe,
 		.config_init = &marvell_config_init,
 		.config_aneg = &m88e1510_config_aneg,
 		.read_status = &marvell_read_status,
@@ -2244,6 +2280,8 @@ static struct phy_driver marvell_drivers[] = {
 		.did_interrupt = &m88e1121_did_interrupt,
 		.resume = &genphy_resume,
 		.suspend = &genphy_suspend,
+		.read_page = marvell_read_page,
+		.write_page = marvell_write_page,
 		.get_sset_count = marvell_get_sset_count,
 		.get_strings = marvell_get_strings,
 		.get_stats = marvell_get_stats,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 21b3f36..8a0bd98 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -6,12 +6,18 @@
  *
  * There appears to be several different data paths through the PHY which
  * are automatically managed by the PHY.  The following has been determined
- * via observation and experimentation:
+ * via observation and experimentation for a setup using single-lane Serdes:
  *
  *       SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G)
  *  10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G)
  *  10GBASE-KR PHYXS -- BASE-R PCS -- Fiber
  *
+ * With XAUI, observation shows:
+ *
+ *        XAUI PHYXS -- <appropriate PCS as above>
+ *
+ * and no switching of the host interface mode occurs.
+ *
  * If both the fiber and copper ports are connected, the first to gain
  * link takes priority and the other port is completely locked out.
  */
@@ -23,19 +29,17 @@ enum {
 	MV_PCS_BASE_R		= 0x1000,
 	MV_PCS_1000BASEX	= 0x2000,
 
+	MV_PCS_PAIRSWAP		= 0x8182,
+	MV_PCS_PAIRSWAP_MASK	= 0x0003,
+	MV_PCS_PAIRSWAP_AB	= 0x0002,
+	MV_PCS_PAIRSWAP_NONE	= 0x0003,
+
 	/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
 	 * registers appear to set themselves to the 0x800X when AN is
 	 * restarted, but status registers appear readable from either.
 	 */
 	MV_AN_CTRL1000		= 0x8000, /* 1000base-T control register */
 	MV_AN_STAT1000		= 0x8001, /* 1000base-T status register */
-
-	/* This register appears to reflect the copper status */
-	MV_AN_RESULT		= 0xa016,
-	MV_AN_RESULT_SPD_10	= BIT(12),
-	MV_AN_RESULT_SPD_100	= BIT(13),
-	MV_AN_RESULT_SPD_1000	= BIT(14),
-	MV_AN_RESULT_SPD_10000	= BIT(15),
 };
 
 static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg,
@@ -84,7 +88,6 @@ static int mv3310_config_init(struct phy_device *phydev)
 
 	/* Check that the PHY interface type is compatible */
 	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
-	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
 	    phydev->interface != PHY_INTERFACE_MODE_XAUI &&
 	    phydev->interface != PHY_INTERFACE_MODE_RXAUI &&
 	    phydev->interface != PHY_INTERFACE_MODE_10GKR)
@@ -150,12 +153,18 @@ static int mv3310_config_init(struct phy_device *phydev)
 		if (val & MDIO_PMA_EXTABLE_1000BKX)
 			__set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
 				  supported);
-		if (val & MDIO_PMA_EXTABLE_100BTX)
+		if (val & MDIO_PMA_EXTABLE_100BTX) {
 			__set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
 				  supported);
-		if (val & MDIO_PMA_EXTABLE_10BT)
+			__set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+				  supported);
+		}
+		if (val & MDIO_PMA_EXTABLE_10BT) {
 			__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
 				  supported);
+			__set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+				  supported);
+		}
 	}
 
 	if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
@@ -175,6 +184,9 @@ static int mv3310_config_aneg(struct phy_device *phydev)
 	u32 advertising;
 	int ret;
 
+	/* We don't support manual MDI control */
+	phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
 	if (phydev->autoneg == AUTONEG_DISABLE) {
 		ret = genphy_c45_pma_setup_forced(phydev);
 		if (ret < 0)
@@ -233,6 +245,24 @@ static int mv3310_aneg_done(struct phy_device *phydev)
 	return genphy_c45_aneg_done(phydev);
 }
 
+static void mv3310_update_interface(struct phy_device *phydev)
+{
+	if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
+	     phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
+		/* The PHY automatically switches its serdes interface (and
+		 * active PHYXS instance) between Cisco SGMII and 10GBase-KR
+		 * modes according to the speed.  Florian suggests setting
+		 * phydev->interface to communicate this to the MAC. Only do
+		 * this if we are already in either SGMII or 10GBase-KR mode.
+		 */
+		if (phydev->speed == SPEED_10000)
+			phydev->interface = PHY_INTERFACE_MODE_10GKR;
+		else if (phydev->speed >= SPEED_10 &&
+			 phydev->speed < SPEED_10000)
+			phydev->interface = PHY_INTERFACE_MODE_SGMII;
+	}
+}
+
 /* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */
 static int mv3310_read_10gbr_status(struct phy_device *phydev)
 {
@@ -240,8 +270,7 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev)
 	phydev->speed = SPEED_10000;
 	phydev->duplex = DUPLEX_FULL;
 
-	if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
-		phydev->interface = PHY_INTERFACE_MODE_10GKR;
+	mv3310_update_interface(phydev);
 
 	return 0;
 }
@@ -264,6 +293,7 @@ static int mv3310_read_status(struct phy_device *phydev)
 	phydev->link = 0;
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
+	phydev->mdix = 0;
 
 	val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1);
 	if (val < 0)
@@ -294,22 +324,8 @@ static int mv3310_read_status(struct phy_device *phydev)
 
 		phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
 
-		if (phydev->autoneg == AUTONEG_ENABLE) {
-			val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT);
-			if (val < 0)
-				return val;
-
-			if (val & MV_AN_RESULT_SPD_10000)
-				phydev->speed = SPEED_10000;
-			else if (val & MV_AN_RESULT_SPD_1000)
-				phydev->speed = SPEED_1000;
-			else if (val & MV_AN_RESULT_SPD_100)
-				phydev->speed = SPEED_100;
-			else if (val & MV_AN_RESULT_SPD_10)
-				phydev->speed = SPEED_10;
-
-			phydev->duplex = DUPLEX_FULL;
-		}
+		if (phydev->autoneg == AUTONEG_ENABLE)
+			phy_resolve_aneg_linkmode(phydev);
 	}
 
 	if (phydev->autoneg != AUTONEG_ENABLE) {
@@ -318,21 +334,30 @@ static int mv3310_read_status(struct phy_device *phydev)
 			return val;
 	}
 
-	if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
-	     phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) {
-		/* The PHY automatically switches its serdes interface (and
-		 * active PHYXS instance) between Cisco SGMII and 10GBase-KR
-		 * modes according to the speed.  Florian suggests setting
-		 * phydev->interface to communicate this to the MAC. Only do
-		 * this if we are already in either SGMII or 10GBase-KR mode.
-		 */
-		if (phydev->speed == SPEED_10000)
-			phydev->interface = PHY_INTERFACE_MODE_10GKR;
-		else if (phydev->speed >= SPEED_10 &&
-			 phydev->speed < SPEED_10000)
-			phydev->interface = PHY_INTERFACE_MODE_SGMII;
+	if (phydev->speed == SPEED_10000) {
+		val = genphy_c45_read_mdix(phydev);
+		if (val < 0)
+			return val;
+	} else {
+		val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP);
+		if (val < 0)
+			return val;
+
+		switch (val & MV_PCS_PAIRSWAP_MASK) {
+		case MV_PCS_PAIRSWAP_AB:
+			phydev->mdix = ETH_TP_MDI_X;
+			break;
+		case MV_PCS_PAIRSWAP_NONE:
+			phydev->mdix = ETH_TP_MDI;
+			break;
+		default:
+			phydev->mdix = ETH_TP_MDI_INVALID;
+			break;
+		}
 	}
 
+	mv3310_update_interface(phydev);
+
 	return 0;
 }
 
@@ -342,7 +367,9 @@ static struct phy_driver mv3310_drivers[] = {
 		.phy_id_mask	= MARVELL_PHY_ID_MASK,
 		.name		= "mv88x3310",
 		.features	= SUPPORTED_10baseT_Full |
+				  SUPPORTED_10baseT_Half |
 				  SUPPORTED_100baseT_Full |
+				  SUPPORTED_100baseT_Half |
 				  SUPPORTED_1000baseT_Full |
 				  SUPPORTED_Autoneg |
 				  SUPPORTED_TP |
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 08e0647..8d37066 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -205,6 +205,8 @@ static int unimac_mdio_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -EINVAL;
 
 	/* Just ioremap, as this MDIO block is usually integrated into an
 	 * Ethernet MAC controller register range
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 54d00a1..88272b3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -38,6 +38,7 @@
 #include <linux/phy.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/gpio/consumer.h>
 
 #include <asm/irq.h>
 
@@ -46,11 +47,41 @@
 
 #include "mdio-boardinfo.h"
 
+static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
+{
+	struct gpio_desc *gpiod = NULL;
+
+	/* Deassert the optional reset signal */
+	if (mdiodev->dev.of_node)
+		gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode,
+					       "reset-gpios", 0, GPIOD_OUT_LOW,
+					       "PHY reset");
+	if (PTR_ERR(gpiod) == -ENOENT)
+		gpiod = NULL;
+	else if (IS_ERR(gpiod))
+		return PTR_ERR(gpiod);
+
+	mdiodev->reset = gpiod;
+
+	/* Assert the reset signal again */
+	mdio_device_reset(mdiodev, 1);
+
+	return 0;
+}
+
 int mdiobus_register_device(struct mdio_device *mdiodev)
 {
+	int err;
+
 	if (mdiodev->bus->mdio_map[mdiodev->addr])
 		return -EBUSY;
 
+	if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) {
+		err = mdiobus_register_gpiod(mdiodev);
+		if (err)
+			return err;
+	}
+
 	mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev;
 
 	return 0;
@@ -421,6 +452,9 @@ void mdiobus_unregister(struct mii_bus *bus)
 		if (!mdiodev)
 			continue;
 
+		if (mdiodev->reset)
+			gpiod_put(mdiodev->reset);
+
 		mdiodev->device_remove(mdiodev);
 		mdiodev->device_free(mdiodev);
 	}
@@ -494,6 +528,55 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
 EXPORT_SYMBOL(mdiobus_scan);
 
 /**
+ * __mdiobus_read - Unlocked version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * Read a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+	int retval;
+
+	WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+	retval = bus->read(bus, addr, regnum);
+
+	trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+
+	return retval;
+}
+EXPORT_SYMBOL(__mdiobus_read);
+
+/**
+ * __mdiobus_write - Unlocked version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Write a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+	int err;
+
+	WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock));
+
+	err = bus->write(bus, addr, regnum, val);
+
+	trace_mdio_access(bus, 0, addr, regnum, val, err);
+
+	return err;
+}
+EXPORT_SYMBOL(__mdiobus_write);
+
+/**
  * mdiobus_read_nested - Nested version of the mdiobus_read function
  * @bus: the mii_bus struct
  * @addr: the phy address
@@ -513,11 +596,9 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
 	BUG_ON(in_interrupt());
 
 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-	retval = bus->read(bus, addr, regnum);
+	retval = __mdiobus_read(bus, addr, regnum);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
 	return retval;
 }
 EXPORT_SYMBOL(mdiobus_read_nested);
@@ -539,11 +620,9 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
 	BUG_ON(in_interrupt());
 
 	mutex_lock(&bus->mdio_lock);
-	retval = bus->read(bus, addr, regnum);
+	retval = __mdiobus_read(bus, addr, regnum);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 1, addr, regnum, retval, retval);
-
 	return retval;
 }
 EXPORT_SYMBOL(mdiobus_read);
@@ -569,11 +648,9 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
 	BUG_ON(in_interrupt());
 
 	mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
-	err = bus->write(bus, addr, regnum, val);
+	err = __mdiobus_write(bus, addr, regnum, val);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 0, addr, regnum, val, err);
-
 	return err;
 }
 EXPORT_SYMBOL(mdiobus_write_nested);
@@ -596,11 +673,9 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
 	BUG_ON(in_interrupt());
 
 	mutex_lock(&bus->mdio_lock);
-	err = bus->write(bus, addr, regnum, val);
+	err = __mdiobus_write(bus, addr, regnum, val);
 	mutex_unlock(&bus->mdio_lock);
 
-	trace_mdio_access(bus, 0, addr, regnum, val, err);
-
 	return err;
 }
 EXPORT_SYMBOL(mdiobus_write);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index e24f289..c924700 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -12,6 +12,8 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
@@ -22,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/unistd.h>
+#include <linux/delay.h>
 
 void mdio_device_free(struct mdio_device *mdiodev)
 {
@@ -114,6 +117,21 @@ void mdio_device_remove(struct mdio_device *mdiodev)
 }
 EXPORT_SYMBOL(mdio_device_remove);
 
+void mdio_device_reset(struct mdio_device *mdiodev, int value)
+{
+	unsigned int d;
+
+	if (!mdiodev->reset)
+		return;
+
+	gpiod_set_value(mdiodev->reset, value);
+
+	d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
+	if (d)
+		usleep_range(d, d + max_t(unsigned int, d / 10, 100));
+}
+EXPORT_SYMBOL(mdio_device_reset);
+
 /**
  * mdio_probe - probe an MDIO device
  * @dev: device to probe
@@ -128,8 +146,16 @@ static int mdio_probe(struct device *dev)
 	struct mdio_driver *mdiodrv = to_mdio_driver(drv);
 	int err = 0;
 
-	if (mdiodrv->probe)
+	if (mdiodrv->probe) {
+		/* Deassert the reset signal */
+		mdio_device_reset(mdiodev, 0);
+
 		err = mdiodrv->probe(mdiodev);
+		if (err) {
+			/* Assert the reset signal */
+			mdio_device_reset(mdiodev, 1);
+		}
+	}
 
 	return err;
 }
@@ -140,9 +166,13 @@ static int mdio_remove(struct device *dev)
 	struct device_driver *drv = mdiodev->dev.driver;
 	struct mdio_driver *mdiodrv = to_mdio_driver(drv);
 
-	if (mdiodrv->remove)
+	if (mdiodrv->remove) {
 		mdiodrv->remove(mdiodev);
 
+		/* Assert the reset signal */
+		mdio_device_reset(mdiodev, 1);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 842eb87..ddc2c5e 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -24,31 +24,129 @@
 #include <linux/netdevice.h>
 #include <linux/bitfield.h>
 
+#define TSTCNTL		20
+#define  TSTCNTL_READ		BIT(15)
+#define  TSTCNTL_WRITE		BIT(14)
+#define  TSTCNTL_REG_BANK_SEL	GENMASK(12, 11)
+#define  TSTCNTL_TEST_MODE	BIT(10)
+#define  TSTCNTL_READ_ADDRESS	GENMASK(9, 5)
+#define  TSTCNTL_WRITE_ADDRESS	GENMASK(4, 0)
+#define TSTREAD1	21
+#define TSTWRITE	23
+#define INTSRC_FLAG	29
+#define  INTSRC_ANEG_PR		BIT(1)
+#define  INTSRC_PARALLEL_FAULT	BIT(2)
+#define  INTSRC_ANEG_LP_ACK	BIT(3)
+#define  INTSRC_LINK_DOWN	BIT(4)
+#define  INTSRC_REMOTE_FAULT	BIT(5)
+#define  INTSRC_ANEG_COMPLETE	BIT(6)
+#define INTSRC_MASK	30
+
+#define BANK_ANALOG_DSP		0
+#define BANK_WOL		1
+#define BANK_BIST		3
+
+/* WOL Registers */
+#define LPI_STATUS	0xc
+#define  LPI_STATUS_RSV12	BIT(12)
+
+/* BIST Registers */
+#define FR_PLL_CONTROL	0x1b
+#define FR_PLL_DIV0	0x1c
+#define FR_PLL_DIV1	0x1d
+
+static int meson_gxl_open_banks(struct phy_device *phydev)
+{
+	int ret;
+
+	/* Enable Analog and DSP register Bank access by
+	 * toggling TSTCNTL_TEST_MODE bit in the TSTCNTL register
+	 */
+	ret = phy_write(phydev, TSTCNTL, 0);
+	if (ret)
+		return ret;
+	ret = phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+	if (ret)
+		return ret;
+	ret = phy_write(phydev, TSTCNTL, 0);
+	if (ret)
+		return ret;
+	return phy_write(phydev, TSTCNTL, TSTCNTL_TEST_MODE);
+}
+
+static void meson_gxl_close_banks(struct phy_device *phydev)
+{
+	phy_write(phydev, TSTCNTL, 0);
+}
+
+static int meson_gxl_read_reg(struct phy_device *phydev,
+			      unsigned int bank, unsigned int reg)
+{
+	int ret;
+
+	ret = meson_gxl_open_banks(phydev);
+	if (ret)
+		goto out;
+
+	ret = phy_write(phydev, TSTCNTL, TSTCNTL_READ |
+			FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+			TSTCNTL_TEST_MODE |
+			FIELD_PREP(TSTCNTL_READ_ADDRESS, reg));
+	if (ret)
+		goto out;
+
+	ret = phy_read(phydev, TSTREAD1);
+out:
+	/* Close the bank access on our way out */
+	meson_gxl_close_banks(phydev);
+	return ret;
+}
+
+static int meson_gxl_write_reg(struct phy_device *phydev,
+			       unsigned int bank, unsigned int reg,
+			       uint16_t value)
+{
+	int ret;
+
+	ret = meson_gxl_open_banks(phydev);
+	if (ret)
+		goto out;
+
+	ret = phy_write(phydev, TSTWRITE, value);
+	if (ret)
+		goto out;
+
+	ret = phy_write(phydev, TSTCNTL, TSTCNTL_WRITE |
+			FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) |
+			TSTCNTL_TEST_MODE |
+			FIELD_PREP(TSTCNTL_WRITE_ADDRESS, reg));
+
+out:
+	/* Close the bank access on our way out */
+	meson_gxl_close_banks(phydev);
+	return ret;
+}
+
 static int meson_gxl_config_init(struct phy_device *phydev)
 {
-	/* Enable Analog and DSP register Bank access by */
-	phy_write(phydev, 0x14, 0x0000);
-	phy_write(phydev, 0x14, 0x0400);
-	phy_write(phydev, 0x14, 0x0000);
-	phy_write(phydev, 0x14, 0x0400);
-
-	/* Write Analog register 23 */
-	phy_write(phydev, 0x17, 0x8E0D);
-	phy_write(phydev, 0x14, 0x4417);
+	int ret;
 
 	/* Enable fractional PLL */
-	phy_write(phydev, 0x17, 0x0005);
-	phy_write(phydev, 0x14, 0x5C1B);
+	ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_CONTROL, 0x5);
+	if (ret)
+		return ret;
 
 	/* Program fraction FR_PLL_DIV1 */
-	phy_write(phydev, 0x17, 0x029A);
-	phy_write(phydev, 0x14, 0x5C1D);
+	ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV1, 0x029a);
+	if (ret)
+		return ret;
 
 	/* Program fraction FR_PLL_DIV1 */
-	phy_write(phydev, 0x17, 0xAAAA);
-	phy_write(phydev, 0x14, 0x5C1C);
+	ret = meson_gxl_write_reg(phydev, BANK_BIST, FR_PLL_DIV0, 0xaaaa);
+	if (ret)
+		return ret;
 
-	return 0;
+	return genphy_config_init(phydev);
 }
 
 /* This function is provided to cope with the possible failures of this phy
@@ -78,27 +176,8 @@ static int meson_gxl_read_status(struct phy_device *phydev)
 		else if (!ret)
 			goto read_status_continue;
 
-		/* Need to access WOL bank, make sure the access is open */
-		ret = phy_write(phydev, 0x14, 0x0000);
-		if (ret)
-			return ret;
-		ret = phy_write(phydev, 0x14, 0x0400);
-		if (ret)
-			return ret;
-		ret = phy_write(phydev, 0x14, 0x0000);
-		if (ret)
-			return ret;
-		ret = phy_write(phydev, 0x14, 0x0400);
-		if (ret)
-			return ret;
-
-		/* Request LPI_STATUS WOL register */
-		ret = phy_write(phydev, 0x14, 0x8D80);
-		if (ret)
-			return ret;
-
-		/* Read LPI_STATUS value */
-		wol = phy_read(phydev, 0x15);
+		/* Aneg is done, let's check everything is fine */
+		wol = meson_gxl_read_reg(phydev, BANK_WOL, LPI_STATUS);
 		if (wol < 0)
 			return wol;
 
@@ -110,7 +189,7 @@ static int meson_gxl_read_status(struct phy_device *phydev)
 		if (exp < 0)
 			return exp;
 
-		if (!(wol & BIT(12)) ||
+		if (!(wol & LPI_STATUS_RSV12) ||
 		    ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) {
 			/* Looks like aneg failed after all */
 			phydev_dbg(phydev, "LPA corruption - aneg restart\n");
@@ -122,17 +201,43 @@ static int meson_gxl_read_status(struct phy_device *phydev)
 	return genphy_read_status(phydev);
 }
 
+static int meson_gxl_ack_interrupt(struct phy_device *phydev)
+{
+	int ret = phy_read(phydev, INTSRC_FLAG);
+
+	return ret < 0 ? ret : 0;
+}
+
+static int meson_gxl_config_intr(struct phy_device *phydev)
+{
+	u16 val;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+		val = INTSRC_ANEG_PR
+			| INTSRC_PARALLEL_FAULT
+			| INTSRC_ANEG_LP_ACK
+			| INTSRC_LINK_DOWN
+			| INTSRC_REMOTE_FAULT
+			| INTSRC_ANEG_COMPLETE;
+	} else {
+		val = 0;
+	}
+
+	return phy_write(phydev, INTSRC_MASK, val);
+}
+
 static struct phy_driver meson_gxl_phy[] = {
 	{
 		.phy_id		= 0x01814400,
 		.phy_id_mask	= 0xfffffff0,
 		.name		= "Meson GXL Internal PHY",
 		.features	= PHY_BASIC_FEATURES,
-		.flags		= PHY_IS_INTERNAL,
+		.flags		= PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
 		.config_init	= meson_gxl_config_init,
-		.config_aneg	= genphy_config_aneg,
 		.aneg_done      = genphy_aneg_done,
 		.read_status	= meson_gxl_read_status,
+		.ack_interrupt	= meson_gxl_ack_interrupt,
+		.config_intr	= meson_gxl_config_intr,
 		.suspend        = genphy_suspend,
 		.resume         = genphy_resume,
 	},
@@ -150,4 +255,5 @@ MODULE_DEVICE_TABLE(mdio, meson_gxl_tbl);
 MODULE_DESCRIPTION("Amlogic Meson GXL Internal PHY driver");
 MODULE_AUTHOR("Baoqi wang");
 MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 422ff63..0f45310 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -802,8 +802,6 @@ static struct phy_driver ksphy_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.driver_data	= &ks8737_type,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.suspend	= genphy_suspend,
@@ -817,8 +815,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8021_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -835,8 +831,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8021_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -854,7 +848,6 @@ static struct phy_driver ksphy_driver[] = {
 	.probe		= kszphy_probe,
 	.config_init	= ksz8041_config_init,
 	.config_aneg	= ksz8041_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -871,8 +864,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8041_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -889,8 +880,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8051_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -907,8 +896,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8041_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -925,8 +912,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz8081_type,
 	.probe		= kszphy_probe,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -941,8 +926,6 @@ static struct phy_driver ksphy_driver[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.suspend	= genphy_suspend,
@@ -956,8 +939,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz9021_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz9021_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.get_sset_count = kszphy_get_sset_count,
@@ -976,7 +957,6 @@ static struct phy_driver ksphy_driver[] = {
 	.driver_data	= &ksz9021_type,
 	.probe		= kszphy_probe,
 	.config_init	= ksz9031_config_init,
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= ksz9031_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
@@ -1001,8 +981,6 @@ static struct phy_driver ksphy_driver[] = {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 }, {
@@ -1022,8 +1000,6 @@ static struct phy_driver ksphy_driver[] = {
 	.name		= "Microchip KSZ9477",
 	.features	= PHY_GBIT_FEATURES,
 	.config_init	= kszphy_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 } };
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 37ee856..0f293ef 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -153,7 +153,6 @@ static struct phy_driver microchip_phy_driver[] = {
 
 	.config_init	= genphy_config_init,
 	.config_aneg	= lan88xx_config_aneg,
-	.read_status	= genphy_read_status,
 
 	.ack_interrupt	= lan88xx_phy_ack_interrupt,
 	.config_intr	= lan88xx_phy_config_intr,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2addf1d..2b1e336 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -136,8 +136,6 @@ static struct phy_driver dp83865_driver[] = { {
 	.features = PHY_GBIT_FEATURES,
 	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ns_config_init,
-	.config_aneg = genphy_config_aneg,
-	.read_status = genphy_read_status,
 	.ack_interrupt = ns_ack_interrupt,
 	.config_intr = ns_config_intr,
 } };
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index dada819..a457685 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -233,6 +233,39 @@ int genphy_c45_read_pma(struct phy_device *phydev)
 }
 EXPORT_SYMBOL_GPL(genphy_c45_read_pma);
 
+/**
+ * genphy_c45_read_mdix - read mdix status from PMA
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_mdix(struct phy_device *phydev)
+{
+	int val;
+
+	if (phydev->speed == SPEED_10000) {
+		val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
+				   MDIO_PMA_10GBT_SWAPPOL);
+		if (val < 0)
+			return val;
+
+		switch (val) {
+		case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+			phydev->mdix = ETH_TP_MDI;
+			break;
+
+		case 0:
+			phydev->mdix = ETH_TP_MDI_X;
+			break;
+
+		default:
+			phydev->mdix = ETH_TP_MDI_INVALID;
+			break;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
+
 /* The gen10g_* functions are the old Clause 45 stub */
 
 static int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 21f75ae..e75989c 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -189,17 +189,61 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
 	return count;
 }
 
+/**
+ * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings
+ * @phydev: The phy_device struct
+ *
+ * Resolve our and the link partner advertisments into their corresponding
+ * speed and duplex. If full duplex was negotiated, extract the pause mode
+ * from the link partner mask.
+ */
+void phy_resolve_aneg_linkmode(struct phy_device *phydev)
+{
+	u32 common = phydev->lp_advertising & phydev->advertising;
+
+	if (common & ADVERTISED_10000baseT_Full) {
+		phydev->speed = SPEED_10000;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_1000baseT_Full) {
+		phydev->speed = SPEED_1000;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_1000baseT_Half) {
+		phydev->speed = SPEED_1000;
+		phydev->duplex = DUPLEX_HALF;
+	} else if (common & ADVERTISED_100baseT_Full) {
+		phydev->speed = SPEED_100;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_100baseT_Half) {
+		phydev->speed = SPEED_100;
+		phydev->duplex = DUPLEX_HALF;
+	} else if (common & ADVERTISED_10baseT_Full) {
+		phydev->speed = SPEED_10;
+		phydev->duplex = DUPLEX_FULL;
+	} else if (common & ADVERTISED_10baseT_Half) {
+		phydev->speed = SPEED_10;
+		phydev->duplex = DUPLEX_HALF;
+	}
+
+	if (phydev->duplex == DUPLEX_FULL) {
+		phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
+		phydev->asym_pause = !!(phydev->lp_advertising &
+					ADVERTISED_Asym_Pause);
+	}
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+
 static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
 			     u16 regnum)
 {
 	/* Write the desired MMD Devad */
-	bus->write(bus, phy_addr, MII_MMD_CTRL, devad);
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devad);
 
 	/* Write the desired MMD register address */
-	bus->write(bus, phy_addr, MII_MMD_DATA, regnum);
+	__mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum);
 
 	/* Select the Function : DATA with no post increment */
-	bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR);
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL,
+			devad | MII_MMD_CTRL_NOINCR);
 }
 
 /**
@@ -232,7 +276,7 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
 		mmd_phy_indirect(bus, phy_addr, devad, regnum);
 
 		/* Read the content of the MMD's selected register */
-		val = bus->read(bus, phy_addr, MII_MMD_DATA);
+		val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
 		mutex_unlock(&bus->mdio_lock);
 	}
 	return val;
@@ -271,7 +315,7 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
 		mmd_phy_indirect(bus, phy_addr, devad, regnum);
 
 		/* Write the data into MMD's selected register */
-		bus->write(bus, phy_addr, MII_MMD_DATA, val);
+		__mdiobus_write(bus, phy_addr, MII_MMD_DATA, val);
 		mutex_unlock(&bus->mdio_lock);
 
 		ret = 0;
@@ -279,3 +323,208 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val)
 	return ret;
 }
 EXPORT_SYMBOL(phy_write_mmd);
+
+/**
+ * __phy_modify() - Convenience function for modifying a PHY register
+ * @phydev: a pointer to a &struct phy_device
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Unlocked helper function which allows a PHY register to be modified as
+ * new register value = (old register value & ~mask) | set
+ */
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+	int ret, res;
+
+	ret = __phy_read(phydev, regnum);
+	if (ret >= 0) {
+		res = __phy_write(phydev, regnum, (ret & ~mask) | set);
+		if (res < 0)
+			ret = res;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__phy_modify);
+
+/**
+ * phy_modify - Convenience function for modifying a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: new value of bits set in mask to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set)
+{
+	int ret;
+
+	mutex_lock(&phydev->mdio.bus->mdio_lock);
+	ret = __phy_modify(phydev, regnum, mask, set);
+	mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(phy_modify);
+
+static int __phy_read_page(struct phy_device *phydev)
+{
+	return phydev->drv->read_page(phydev);
+}
+
+static int __phy_write_page(struct phy_device *phydev, int page)
+{
+	return phydev->drv->write_page(phydev, page);
+}
+
+/**
+ * phy_save_page() - take the bus lock and save the current page
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * Take the MDIO bus lock, and return the current page number. On error,
+ * returns a negative errno. phy_restore_page() must always be called
+ * after this, irrespective of success or failure of this call.
+ */
+int phy_save_page(struct phy_device *phydev)
+{
+	mutex_lock(&phydev->mdio.bus->mdio_lock);
+	return __phy_read_page(phydev);
+}
+EXPORT_SYMBOL_GPL(phy_save_page);
+
+/**
+ * phy_select_page() - take the bus lock, save the current page, and set a page
+ * @phydev: a pointer to a &struct phy_device
+ * @page: desired page
+ *
+ * Take the MDIO bus lock to protect against concurrent access, save the
+ * current PHY page, and set the current page.  On error, returns a
+ * negative errno, otherwise returns the previous page number.
+ * phy_restore_page() must always be called after this, irrespective
+ * of success or failure of this call.
+ */
+int phy_select_page(struct phy_device *phydev, int page)
+{
+	int ret, oldpage;
+
+	oldpage = ret = phy_save_page(phydev);
+	if (ret < 0)
+		return ret;
+
+	if (oldpage != page) {
+		ret = __phy_write_page(phydev, page);
+		if (ret < 0)
+			return ret;
+	}
+
+	return oldpage;
+}
+EXPORT_SYMBOL_GPL(phy_select_page);
+
+/**
+ * phy_restore_page() - restore the page register and release the bus lock
+ * @phydev: a pointer to a &struct phy_device
+ * @oldpage: the old page, return value from phy_save_page() or phy_select_page()
+ * @ret: operation's return code
+ *
+ * Release the MDIO bus lock, restoring @oldpage if it is a valid page.
+ * This function propagates the earliest error code from the group of
+ * operations.
+ *
+ * Returns:
+ *   @oldpage if it was a negative value, otherwise
+ *   @ret if it was a negative errno value, otherwise
+ *   phy_write_page()'s negative value if it were in error, otherwise
+ *   @ret.
+ */
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret)
+{
+	int r;
+
+	if (oldpage >= 0) {
+		r = __phy_write_page(phydev, oldpage);
+
+		/* Propagate the operation return code if the page write
+		 * was successful.
+		 */
+		if (ret >= 0 && r < 0)
+			ret = r;
+	} else {
+		/* Propagate the phy page selection error code */
+		ret = oldpage;
+	}
+
+	mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(phy_restore_page);
+
+/**
+ * phy_read_paged() - Convenience function for reading a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ *
+ * Same rules as for phy_read().
+ */
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum)
+{
+	int ret = 0, oldpage;
+
+	oldpage = phy_select_page(phydev, page);
+	if (oldpage >= 0)
+		ret = __phy_read(phydev, regnum);
+
+	return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_read_paged);
+
+/**
+ * phy_write_paged() - Convenience function for writing a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @val: value to write
+ *
+ * Same rules as for phy_write().
+ */
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
+{
+	int ret = 0, oldpage;
+
+	oldpage = phy_select_page(phydev, page);
+	if (oldpage >= 0)
+		ret = __phy_write(phydev, regnum, val);
+
+	return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_write_paged);
+
+/**
+ * phy_modify_paged() - Convenience function for modifying a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Same rules as for phy_read() and phy_write().
+ */
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+		     u16 mask, u16 set)
+{
+	int ret = 0, oldpage;
+
+	oldpage = phy_select_page(phydev, page);
+	if (oldpage >= 0)
+		ret = __phy_modify(phydev, regnum, mask, set);
+
+	return phy_restore_page(phydev, oldpage, ret);
+}
+EXPORT_SYMBOL(phy_modify_paged);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ed10d1f..0c165ad 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -493,7 +493,10 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
 	/* Invalidate LP advertising flags */
 	phydev->lp_advertising = 0;
 
-	err = phydev->drv->config_aneg(phydev);
+	if (phydev->drv->config_aneg)
+		err = phydev->drv->config_aneg(phydev);
+	else
+		err = genphy_config_aneg(phydev);
 	if (err < 0)
 		goto out_unlock;
 
@@ -629,9 +632,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
 	if (PHY_HALTED == phydev->state)
 		return IRQ_NONE;		/* It can't be ours.  */
 
-	disable_irq_nosync(irq);
-	atomic_inc(&phydev->irq_disable);
-
 	phy_change(phydev);
 
 	return IRQ_HANDLED;
@@ -689,7 +689,6 @@ static int phy_disable_interrupts(struct phy_device *phydev)
  */
 int phy_start_interrupts(struct phy_device *phydev)
 {
-	atomic_set(&phydev->irq_disable, 0);
 	if (request_threaded_irq(phydev->irq, NULL, phy_interrupt,
 				 IRQF_ONESHOT | IRQF_SHARED,
 				 phydev_name(phydev), phydev) < 0) {
@@ -716,13 +715,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
 
 	free_irq(phydev->irq, phydev);
 
-	/* If work indeed has been cancelled, disable_irq() will have
-	 * been left unbalanced from phy_interrupt() and enable_irq()
-	 * has to be called so that other devices on the line work.
-	 */
-	while (atomic_dec_return(&phydev->irq_disable) >= 0)
-		enable_irq(phydev->irq);
-
 	return err;
 }
 EXPORT_SYMBOL(phy_stop_interrupts);
@@ -736,10 +728,11 @@ void phy_change(struct phy_device *phydev)
 	if (phy_interrupt_is_valid(phydev)) {
 		if (phydev->drv->did_interrupt &&
 		    !phydev->drv->did_interrupt(phydev))
-			goto ignore;
+			return;
 
-		if (phy_disable_interrupts(phydev))
-			goto phy_err;
+		if (phydev->state == PHY_HALTED)
+			if (phy_disable_interrupts(phydev))
+				goto phy_err;
 	}
 
 	mutex_lock(&phydev->lock);
@@ -747,28 +740,13 @@ void phy_change(struct phy_device *phydev)
 		phydev->state = PHY_CHANGELINK;
 	mutex_unlock(&phydev->lock);
 
-	if (phy_interrupt_is_valid(phydev)) {
-		atomic_dec(&phydev->irq_disable);
-		enable_irq(phydev->irq);
-
-		/* Reenable interrupts */
-		if (PHY_HALTED != phydev->state &&
-		    phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
-			goto irq_enable_err;
-	}
-
 	/* reschedule state queue work to run as soon as possible */
 	phy_trigger_machine(phydev, true);
+
+	if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+		goto phy_err;
 	return;
 
-ignore:
-	atomic_dec(&phydev->irq_disable);
-	enable_irq(phydev->irq);
-	return;
-
-irq_enable_err:
-	disable_irq(phydev->irq);
-	atomic_inc(&phydev->irq_disable);
 phy_err:
 	phy_error(phydev);
 }
@@ -1003,10 +981,6 @@ void phy_state_machine(struct work_struct *work)
 			phydev->state = PHY_NOLINK;
 			phy_link_down(phydev, true);
 		}
-
-		if (phy_interrupt_is_valid(phydev))
-			err = phy_config_interrupt(phydev,
-						   PHY_INTERRUPT_ENABLED);
 		break;
 	case PHY_HALTED:
 		if (phydev->link) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b15b31c..6bd11a0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -634,6 +634,9 @@ int phy_device_register(struct phy_device *phydev)
 	if (err)
 		return err;
 
+	/* Deassert the reset signal */
+	phy_device_reset(phydev, 0);
+
 	/* Run all of the fixups for this PHY */
 	err = phy_scan_fixups(phydev);
 	if (err) {
@@ -652,6 +655,9 @@ int phy_device_register(struct phy_device *phydev)
 	return 0;
 
  out:
+	/* Assert the reset signal */
+	phy_device_reset(phydev, 1);
+
 	mdiobus_unregister_device(&phydev->mdio);
 	return err;
 }
@@ -668,6 +674,10 @@ EXPORT_SYMBOL(phy_device_register);
 void phy_device_remove(struct phy_device *phydev)
 {
 	device_del(&phydev->mdio.dev);
+
+	/* Assert the reset signal */
+	phy_device_reset(phydev, 1);
+
 	mdiobus_unregister_device(&phydev->mdio);
 }
 EXPORT_SYMBOL(phy_device_remove);
@@ -851,6 +861,9 @@ int phy_init_hw(struct phy_device *phydev)
 {
 	int ret = 0;
 
+	/* Deassert the reset signal */
+	phy_device_reset(phydev, 0);
+
 	if (!phydev->drv || !phydev->drv->config_init)
 		return 0;
 
@@ -1130,6 +1143,9 @@ void phy_detach(struct phy_device *phydev)
 	put_device(&phydev->mdio.dev);
 	if (ndev_owner != bus->owner)
 		module_put(bus->owner);
+
+	/* Assert the reset signal */
+	phy_device_reset(phydev, 1);
 }
 EXPORT_SYMBOL(phy_detach);
 
@@ -1208,6 +1224,30 @@ int phy_loopback(struct phy_device *phydev, bool enable)
 }
 EXPORT_SYMBOL(phy_loopback);
 
+/**
+ * phy_reset_after_clk_enable - perform a PHY reset if needed
+ * @phydev: target phy_device struct
+ *
+ * Description: Some PHYs are known to need a reset after their refclk was
+ *   enabled. This function evaluates the flags and perform the reset if it's
+ *   needed. Returns < 0 on error, 0 if the phy wasn't reset and 1 if the phy
+ *   was reset.
+ */
+int phy_reset_after_clk_enable(struct phy_device *phydev)
+{
+	if (!phydev || !phydev->drv)
+		return -ENODEV;
+
+	if (phydev->drv->flags & PHY_RST_AFTER_CLK_EN) {
+		phy_device_reset(phydev, 1);
+		phy_device_reset(phydev, 0);
+		return 1;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(phy_reset_after_clk_enable);
+
 /* Generic PHY support and helper functions */
 
 /**
@@ -1328,9 +1368,8 @@ static int genphy_config_eee_advert(struct phy_device *phydev)
  */
 int genphy_setup_forced(struct phy_device *phydev)
 {
-	int ctl = phy_read(phydev, MII_BMCR);
+	u16 ctl = 0;
 
-	ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
 	phydev->pause = 0;
 	phydev->asym_pause = 0;
 
@@ -1342,7 +1381,8 @@ int genphy_setup_forced(struct phy_device *phydev)
 	if (DUPLEX_FULL == phydev->duplex)
 		ctl |= BMCR_FULLDPLX;
 
-	return phy_write(phydev, MII_BMCR, ctl);
+	return phy_modify(phydev, MII_BMCR,
+			  BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
 }
 EXPORT_SYMBOL(genphy_setup_forced);
 
@@ -1352,17 +1392,9 @@ EXPORT_SYMBOL(genphy_setup_forced);
  */
 int genphy_restart_aneg(struct phy_device *phydev)
 {
-	int ctl = phy_read(phydev, MII_BMCR);
-
-	if (ctl < 0)
-		return ctl;
-
-	ctl |= BMCR_ANENABLE | BMCR_ANRESTART;
-
 	/* Don't isolate the PHY if we're negotiating */
-	ctl &= ~BMCR_ISOLATE;
-
-	return phy_write(phydev, MII_BMCR, ctl);
+	return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE,
+			  BMCR_ANENABLE | BMCR_ANRESTART);
 }
 EXPORT_SYMBOL(genphy_restart_aneg);
 
@@ -1628,44 +1660,20 @@ EXPORT_SYMBOL(genphy_config_init);
 
 int genphy_suspend(struct phy_device *phydev)
 {
-	int value;
-
-	mutex_lock(&phydev->lock);
-
-	value = phy_read(phydev, MII_BMCR);
-	phy_write(phydev, MII_BMCR, value | BMCR_PDOWN);
-
-	mutex_unlock(&phydev->lock);
-
-	return 0;
+	return phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN);
 }
 EXPORT_SYMBOL(genphy_suspend);
 
 int genphy_resume(struct phy_device *phydev)
 {
-	int value;
-
-	value = phy_read(phydev, MII_BMCR);
-	phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
-
-	return 0;
+	return phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0);
 }
 EXPORT_SYMBOL(genphy_resume);
 
 int genphy_loopback(struct phy_device *phydev, bool enable)
 {
-	int value;
-
-	value = phy_read(phydev, MII_BMCR);
-	if (value < 0)
-		return value;
-
-	if (enable)
-		value |= BMCR_LOOPBACK;
-	else
-		value &= ~BMCR_LOOPBACK;
-
-	return phy_write(phydev, MII_BMCR, value);
+	return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+			  enable ? BMCR_LOOPBACK : 0);
 }
 EXPORT_SYMBOL(genphy_loopback);
 
@@ -1813,8 +1821,16 @@ static int phy_probe(struct device *dev)
 	/* Set the state to READY by default */
 	phydev->state = PHY_READY;
 
-	if (phydev->drv->probe)
+	if (phydev->drv->probe) {
+		/* Deassert the reset signal */
+		phy_device_reset(phydev, 0);
+
 		err = phydev->drv->probe(phydev);
+		if (err) {
+			/* Assert the reset signal */
+			phy_device_reset(phydev, 1);
+		}
+	}
 
 	mutex_unlock(&phydev->lock);
 
@@ -1831,8 +1847,12 @@ static int phy_remove(struct device *dev)
 	phydev->state = PHY_DOWN;
 	mutex_unlock(&phydev->lock);
 
-	if (phydev->drv && phydev->drv->remove)
+	if (phydev->drv && phydev->drv->remove) {
 		phydev->drv->remove(phydev);
+
+		/* Assert the reset signal */
+		phy_device_reset(phydev, 1);
+	}
 	phydev->drv = NULL;
 
 	return 0;
@@ -1909,9 +1929,7 @@ static struct phy_driver genphy_driver = {
 	.features	= PHY_GBIT_FEATURES | SUPPORTED_MII |
 			  SUPPORTED_AUI | SUPPORTED_FIBRE |
 			  SUPPORTED_BNC,
-	.config_aneg	= genphy_config_aneg,
 	.aneg_done	= genphy_aneg_done,
-	.read_status	= genphy_read_status,
 	.suspend	= genphy_suspend,
 	.resume		= genphy_resume,
 	.set_loopback   = genphy_loopback,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 249ce5c..6ac8b29 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -36,7 +36,11 @@ enum {
 	PHYLINK_DISABLE_LINK,
 };
 
+/**
+ * struct phylink - internal data type for phylink
+ */
 struct phylink {
+	/* private: */
 	struct net_device *netdev;
 	const struct phylink_mac_ops *ops;
 
@@ -50,6 +54,8 @@ struct phylink {
 	/* The link configuration settings */
 	struct phylink_link_state link_config;
 	struct gpio_desc *link_gpio;
+	void (*get_fixed_state)(struct net_device *dev,
+				struct phylink_link_state *s);
 
 	struct mutex state_mutex;
 	struct phylink_link_state phy_state;
@@ -87,6 +93,13 @@ static inline bool linkmode_empty(const unsigned long *src)
 	return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
+/**
+ * phylink_set_port_modes() - set the port type modes in the ethtool mask
+ * @mask: ethtool link mode mask
+ *
+ * Sets all the port type modes in the ethtool mask.  MAC drivers should
+ * use this in their 'validate' callback.
+ */
 void phylink_set_port_modes(unsigned long *mask)
 {
 	phylink_set(mask, TP);
@@ -117,8 +130,7 @@ static const char *phylink_an_mode_str(unsigned int mode)
 	static const char *modestr[] = {
 		[MLO_AN_PHY] = "phy",
 		[MLO_AN_FIXED] = "fixed",
-		[MLO_AN_SGMII] = "SGMII",
-		[MLO_AN_8023Z] = "802.3z",
+		[MLO_AN_INBAND] = "inband",
 	};
 
 	return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
@@ -132,59 +144,64 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported,
 	return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
 }
 
-static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
+static int phylink_parse_fixedlink(struct phylink *pl,
+				   struct fwnode_handle *fwnode)
 {
-	struct device_node *fixed_node;
+	struct fwnode_handle *fixed_node;
 	const struct phy_setting *s;
 	struct gpio_desc *desc;
-	const __be32 *fixed_prop;
 	u32 speed;
-	int ret, len;
+	int ret;
 
-	fixed_node = of_get_child_by_name(np, "fixed-link");
+	fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
 	if (fixed_node) {
-		ret = of_property_read_u32(fixed_node, "speed", &speed);
+		ret = fwnode_property_read_u32(fixed_node, "speed", &speed);
 
 		pl->link_config.speed = speed;
 		pl->link_config.duplex = DUPLEX_HALF;
 
-		if (of_property_read_bool(fixed_node, "full-duplex"))
+		if (fwnode_property_read_bool(fixed_node, "full-duplex"))
 			pl->link_config.duplex = DUPLEX_FULL;
 
 		/* We treat the "pause" and "asym-pause" terminology as
 		 * defining the link partner's ability. */
-		if (of_property_read_bool(fixed_node, "pause"))
+		if (fwnode_property_read_bool(fixed_node, "pause"))
 			pl->link_config.pause |= MLO_PAUSE_SYM;
-		if (of_property_read_bool(fixed_node, "asym-pause"))
+		if (fwnode_property_read_bool(fixed_node, "asym-pause"))
 			pl->link_config.pause |= MLO_PAUSE_ASYM;
 
 		if (ret == 0) {
-			desc = fwnode_get_named_gpiod(&fixed_node->fwnode,
-						      "link-gpios", 0,
-						      GPIOD_IN, "?");
+			desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
+						      0, GPIOD_IN, "?");
 
 			if (!IS_ERR(desc))
 				pl->link_gpio = desc;
 			else if (desc == ERR_PTR(-EPROBE_DEFER))
 				ret = -EPROBE_DEFER;
 		}
-		of_node_put(fixed_node);
+		fwnode_handle_put(fixed_node);
 
 		if (ret)
 			return ret;
 	} else {
-		fixed_prop = of_get_property(np, "fixed-link", &len);
-		if (!fixed_prop) {
+		u32 prop[5];
+
+		ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+						     NULL, 0);
+		if (ret != ARRAY_SIZE(prop)) {
 			netdev_err(pl->netdev, "broken fixed-link?\n");
 			return -EINVAL;
 		}
-		if (len == 5 * sizeof(*fixed_prop)) {
-			pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ?
+
+		ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+						     prop, ARRAY_SIZE(prop));
+		if (!ret) {
+			pl->link_config.duplex = prop[1] ?
 						DUPLEX_FULL : DUPLEX_HALF;
-			pl->link_config.speed = be32_to_cpu(fixed_prop[2]);
-			if (be32_to_cpu(fixed_prop[3]))
+			pl->link_config.speed = prop[2];
+			if (prop[3])
 				pl->link_config.pause |= MLO_PAUSE_SYM;
-			if (be32_to_cpu(fixed_prop[4]))
+			if (prop[4])
 				pl->link_config.pause |= MLO_PAUSE_ASYM;
 		}
 	}
@@ -220,17 +237,17 @@ static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
 	return 0;
 }
 
-static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
+static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
 {
-	struct device_node *dn;
+	struct fwnode_handle *dn;
 	const char *managed;
 
-	dn = of_get_child_by_name(np, "fixed-link");
-	if (dn || of_find_property(np, "fixed-link", NULL))
+	dn = fwnode_get_named_child_node(fwnode, "fixed-link");
+	if (dn || fwnode_property_present(fwnode, "fixed-link"))
 		pl->link_an_mode = MLO_AN_FIXED;
-	of_node_put(dn);
+	fwnode_handle_put(dn);
 
-	if (of_property_read_string(np, "managed", &managed) == 0 &&
+	if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
 	    strcmp(managed, "in-band-status") == 0) {
 		if (pl->link_an_mode == MLO_AN_FIXED) {
 			netdev_err(pl->netdev,
@@ -244,6 +261,7 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
 		phylink_set(pl->supported, Asym_Pause);
 		phylink_set(pl->supported, Pause);
 		pl->link_config.an_enabled = true;
+		pl->link_an_mode = MLO_AN_INBAND;
 
 		switch (pl->link_config.interface) {
 		case PHY_INTERFACE_MODE_SGMII:
@@ -253,17 +271,14 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
 			phylink_set(pl->supported, 100baseT_Full);
 			phylink_set(pl->supported, 1000baseT_Half);
 			phylink_set(pl->supported, 1000baseT_Full);
-			pl->link_an_mode = MLO_AN_SGMII;
 			break;
 
 		case PHY_INTERFACE_MODE_1000BASEX:
 			phylink_set(pl->supported, 1000baseX_Full);
-			pl->link_an_mode = MLO_AN_8023Z;
 			break;
 
 		case PHY_INTERFACE_MODE_2500BASEX:
 			phylink_set(pl->supported, 2500baseX_Full);
-			pl->link_an_mode = MLO_AN_8023Z;
 			break;
 
 		case PHY_INTERFACE_MODE_10GKR:
@@ -280,7 +295,6 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
 			phylink_set(pl->supported, 10000baseLR_Full);
 			phylink_set(pl->supported, 10000baseLRM_Full);
 			phylink_set(pl->supported, 10000baseER_Full);
-			pl->link_an_mode = MLO_AN_SGMII;
 			break;
 
 		default:
@@ -320,8 +334,7 @@ static void phylink_mac_config(struct phylink *pl,
 static void phylink_mac_an_restart(struct phylink *pl)
 {
 	if (pl->link_config.an_enabled &&
-	    (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX ||
-	     pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX))
+	    phy_interface_mode_is_8023z(pl->link_config.interface))
 		pl->ops->mac_an_restart(pl->netdev);
 }
 
@@ -339,12 +352,14 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *
 }
 
 /* The fixed state is... fixed except for the link state,
- * which may be determined by a GPIO.
+ * which may be determined by a GPIO or a callback.
  */
 static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state)
 {
 	*state = pl->link_config;
-	if (pl->link_gpio)
+	if (pl->get_fixed_state)
+		pl->get_fixed_state(pl->netdev, state);
+	else if (pl->link_gpio)
 		state->link = !!gpiod_get_value(pl->link_gpio);
 }
 
@@ -423,7 +438,7 @@ static void phylink_resolve(struct work_struct *w)
 			phylink_mac_config(pl, &link_state);
 			break;
 
-		case MLO_AN_SGMII:
+		case MLO_AN_INBAND:
 			phylink_get_mac_state(pl, &link_state);
 			if (pl->phydev) {
 				bool changed = false;
@@ -449,10 +464,6 @@ static void phylink_resolve(struct work_struct *w)
 				}
 			}
 			break;
-
-		case MLO_AN_8023Z:
-			phylink_get_mac_state(pl, &link_state);
-			break;
 		}
 	}
 
@@ -489,15 +500,27 @@ static void phylink_run_resolve(struct phylink *pl)
 
 static const struct sfp_upstream_ops sfp_phylink_ops;
 
-static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
+static int phylink_register_sfp(struct phylink *pl,
+				struct fwnode_handle *fwnode)
 {
-	struct device_node *sfp_np;
+	struct fwnode_reference_args ref;
+	int ret;
 
-	sfp_np = of_parse_phandle(np, "sfp", 0);
-	if (!sfp_np)
+	if (!fwnode)
 		return 0;
 
-	pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl,
+	ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+						 0, 0, &ref);
+	if (ret < 0) {
+		if (ret == -ENOENT)
+			return 0;
+
+		netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n",
+			   ret);
+		return ret;
+	}
+
+	pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
 					    &sfp_phylink_ops);
 	if (!pl->sfp_bus)
 		return -ENOMEM;
@@ -505,7 +528,22 @@ static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
 	return 0;
 }
 
-struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
+/**
+ * phylink_create() - create a phylink instance
+ * @ndev: a pointer to the &struct net_device
+ * @fwnode: a pointer to a &struct fwnode_handle describing the network
+ *	interface
+ * @iface: the desired link mode defined by &typedef phy_interface_t
+ * @ops: a pointer to a &struct phylink_mac_ops for the MAC.
+ *
+ * Create a new phylink instance, and parse the link parameters found in @np.
+ * This will parse in-band modes, fixed-link or SFP configuration.
+ *
+ * Returns a pointer to a &struct phylink, or an error-pointer value. Users
+ * must use IS_ERR() to check for errors from this function.
+ */
+struct phylink *phylink_create(struct net_device *ndev,
+			       struct fwnode_handle *fwnode,
 			       phy_interface_t iface,
 			       const struct phylink_mac_ops *ops)
 {
@@ -521,7 +559,10 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
 	pl->netdev = ndev;
 	pl->phy_state.interface = iface;
 	pl->link_interface = iface;
-	pl->link_port = PORT_MII;
+	if (iface == PHY_INTERFACE_MODE_MOCA)
+		pl->link_port = PORT_BNC;
+	else
+		pl->link_port = PORT_MII;
 	pl->link_config.interface = iface;
 	pl->link_config.pause = MLO_PAUSE_AN;
 	pl->link_config.speed = SPEED_UNKNOWN;
@@ -534,21 +575,21 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
 	linkmode_copy(pl->link_config.advertising, pl->supported);
 	phylink_validate(pl, pl->supported, &pl->link_config);
 
-	ret = phylink_parse_mode(pl, np);
+	ret = phylink_parse_mode(pl, fwnode);
 	if (ret < 0) {
 		kfree(pl);
 		return ERR_PTR(ret);
 	}
 
 	if (pl->link_an_mode == MLO_AN_FIXED) {
-		ret = phylink_parse_fixedlink(pl, np);
+		ret = phylink_parse_fixedlink(pl, fwnode);
 		if (ret < 0) {
 			kfree(pl);
 			return ERR_PTR(ret);
 		}
 	}
 
-	ret = phylink_register_sfp(pl, np);
+	ret = phylink_register_sfp(pl, fwnode);
 	if (ret < 0) {
 		kfree(pl);
 		return ERR_PTR(ret);
@@ -558,6 +599,13 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(phylink_create);
 
+/**
+ * phylink_destroy() - cleanup and destroy the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Destroy a phylink instance. Any PHY that has been attached must have been
+ * cleaned up via phylink_disconnect_phy() prior to calling this function.
+ */
 void phylink_destroy(struct phylink *pl)
 {
 	if (pl->sfp_bus)
@@ -654,10 +702,39 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
 	return 0;
 }
 
+/**
+ * phylink_connect_phy() - connect a PHY to the phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @phy: a pointer to a &struct phy_device.
+ *
+ * Connect @phy to the phylink instance specified by @pl by calling
+ * phy_attach_direct(). Configure the @phy according to the MAC driver's
+ * capabilities, start the PHYLIB state machine and enable any interrupts
+ * that the PHY supports.
+ *
+ * This updates the phylink's ethtool supported and advertising link mode
+ * masks.
+ *
+ * Returns 0 on success or a negative errno.
+ */
 int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
 {
 	int ret;
 
+	if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+		    (pl->link_an_mode == MLO_AN_INBAND &&
+		     phy_interface_mode_is_8023z(pl->link_interface))))
+		return -EINVAL;
+
+	if (pl->phydev)
+		return -EBUSY;
+
+	/* Use PHY device/driver interface */
+	if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+		pl->link_interface = phy->interface;
+		pl->link_config.interface = pl->link_interface;
+	}
+
 	ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
 	if (ret)
 		return ret;
@@ -670,14 +747,29 @@ int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
 }
 EXPORT_SYMBOL_GPL(phylink_connect_phy);
 
-int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
+/**
+ * phylink_of_phy_connect() - connect the PHY specified in the DT mode.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @dn: a pointer to a &struct device_node.
+ * @flags: PHY-specific flags to communicate to the PHY device driver
+ *
+ * Connect the phy specified in the device node @dn to the phylink instance
+ * specified by @pl. Actions specified in phylink_connect_phy() will be
+ * performed.
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
+			   u32 flags)
 {
 	struct device_node *phy_node;
 	struct phy_device *phy_dev;
 	int ret;
 
-	/* Fixed links are handled without needing a PHY */
-	if (pl->link_an_mode == MLO_AN_FIXED)
+	/* Fixed links and 802.3z are handled without needing a PHY */
+	if (pl->link_an_mode == MLO_AN_FIXED ||
+	    (pl->link_an_mode == MLO_AN_INBAND &&
+	     phy_interface_mode_is_8023z(pl->link_interface)))
 		return 0;
 
 	phy_node = of_parse_phandle(dn, "phy-handle", 0);
@@ -687,14 +779,13 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
 		phy_node = of_parse_phandle(dn, "phy-device", 0);
 
 	if (!phy_node) {
-		if (pl->link_an_mode == MLO_AN_PHY) {
-			netdev_err(pl->netdev, "unable to find PHY node\n");
+		if (pl->link_an_mode == MLO_AN_PHY)
 			return -ENODEV;
-		}
 		return 0;
 	}
 
-	phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface);
+	phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
+				pl->link_interface);
 	/* We're done with the phy_node handle */
 	of_node_put(phy_node);
 
@@ -709,11 +800,18 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
 }
 EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
 
+/**
+ * phylink_disconnect_phy() - disconnect any PHY attached to the phylink
+ *   instance.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Disconnect any current PHY from the phylink instance described by @pl.
+ */
 void phylink_disconnect_phy(struct phylink *pl)
 {
 	struct phy_device *phy;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	phy = pl->phydev;
 	if (phy) {
@@ -730,6 +828,40 @@ void phylink_disconnect_phy(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
 
+/**
+ * phylink_fixed_state_cb() - allow setting a fixed link callback
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @cb: callback to execute to determine the fixed link state.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * can be determined through e.g: an out of band MMIO register.
+ */
+int phylink_fixed_state_cb(struct phylink *pl,
+			   void (*cb)(struct net_device *dev,
+				      struct phylink_link_state *state))
+{
+	/* It does not make sense to let the link be overriden unless we use
+	 * MLO_AN_FIXED
+	 */
+	if (pl->link_an_mode != MLO_AN_FIXED)
+		return -EINVAL;
+
+	mutex_lock(&pl->state_mutex);
+	pl->get_fixed_state = cb;
+	mutex_unlock(&pl->state_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(phylink_fixed_state_cb);
+
+/**
+ * phylink_mac_change() - notify phylink of a change in MAC state
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @up: indicates whether the link is currently up.
+ *
+ * The MAC driver should call this driver when the state of its link
+ * changes (eg, link failure, new negotiation results, etc.)
+ */
 void phylink_mac_change(struct phylink *pl, bool up)
 {
 	if (!up)
@@ -739,9 +871,17 @@ void phylink_mac_change(struct phylink *pl, bool up)
 }
 EXPORT_SYMBOL_GPL(phylink_mac_change);
 
+/**
+ * phylink_start() - start a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Start the phylink instance specified by @pl, configuring the MAC for the
+ * desired link mode(s) and negotiation style. This should be called from the
+ * network device driver's &struct net_device_ops ndo_open() method.
+ */
 void phylink_start(struct phylink *pl)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
 		    phylink_an_mode_str(pl->link_an_mode),
@@ -754,6 +894,12 @@ void phylink_start(struct phylink *pl)
 	phylink_resolve_flow(pl, &pl->link_config);
 	phylink_mac_config(pl, &pl->link_config);
 
+	/* Restart autonegotiation if using 802.3z to ensure that the link
+	 * parameters are properly negotiated.  This is necessary for DSA
+	 * switches using 802.3z negotiation to ensure they see our modes.
+	 */
+	phylink_mac_an_restart(pl);
+
 	clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
 	phylink_run_resolve(pl);
 
@@ -764,9 +910,18 @@ void phylink_start(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_start);
 
+/**
+ * phylink_stop() - stop a phylink instance
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Stop the phylink instance specified by @pl. This should be called from the
+ * network device driver's &struct net_device_ops ndo_stop() method.  The
+ * network device's carrier state should not be changed prior to calling this
+ * function.
+ */
 void phylink_stop(struct phylink *pl)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		phy_stop(pl->phydev);
@@ -779,9 +934,18 @@ void phylink_stop(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_stop);
 
+/**
+ * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters
+ *
+ * Read the wake on lan parameters from the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is currently attached, report no
+ * support for wake on lan.
+ */
 void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	wol->supported = 0;
 	wol->wolopts = 0;
@@ -791,11 +955,22 @@ void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol);
 
+/**
+ * phylink_ethtool_set_wol() - set wake on lan parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @wol: a pointer to &struct ethtool_wolinfo for the desired parameters
+ *
+ * Set the wake on lan parameters for the PHY attached to the phylink
+ * instance specified by @pl. If no PHY is attached, returns %EOPNOTSUPP
+ * error.
+ *
+ * Returns zero on success or negative errno code.
+ */
 int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
 {
 	int ret = -EOPNOTSUPP;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_ethtool_set_wol(pl->phydev, wol);
@@ -826,12 +1001,21 @@ static void phylink_get_ksettings(const struct phylink_link_state *state,
 				AUTONEG_DISABLE;
 }
 
+/**
+ * phylink_ethtool_ksettings_get() - get the current link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings to hold link settings
+ *
+ * Read the current link settings for the phylink instance specified by @pl.
+ * This will be the link settings read from the MAC, PHY or fixed link
+ * settings depending on the current negotiation mode.
+ */
 int phylink_ethtool_ksettings_get(struct phylink *pl,
 				  struct ethtool_link_ksettings *kset)
 {
 	struct phylink_link_state link_state;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev) {
 		phy_ethtool_ksettings_get(pl->phydev, kset);
@@ -851,14 +1035,13 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
 		phylink_get_ksettings(&link_state, kset);
 		break;
 
-	case MLO_AN_SGMII:
+	case MLO_AN_INBAND:
 		/* If there is a phy attached, then use the reported
 		 * settings from the phy with no modification.
 		 */
 		if (pl->phydev)
 			break;
 
-	case MLO_AN_8023Z:
 		phylink_get_mac_state(pl, &link_state);
 
 		/* The MAC is reporting the link results from its own PCS
@@ -873,6 +1056,11 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
 
+/**
+ * phylink_ethtool_ksettings_set() - set the link settings
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @kset: a pointer to a &struct ethtool_link_ksettings for the desired modes
+ */
 int phylink_ethtool_ksettings_set(struct phylink *pl,
 				  const struct ethtool_link_ksettings *kset)
 {
@@ -880,7 +1068,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 	struct phylink_link_state config;
 	int ret;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (kset->base.autoneg != AUTONEG_DISABLE &&
 	    kset->base.autoneg != AUTONEG_ENABLE)
@@ -967,11 +1155,22 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set);
 
+/**
+ * phylink_ethtool_nway_reset() - restart negotiation
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * Restart negotiation for the phylink instance specified by @pl. This will
+ * cause any attached phy to restart negotiation with the link partner, and
+ * if the MAC is in a BaseX mode, the MAC will also be requested to restart
+ * negotiation.
+ *
+ * Returns zero on success, or negative error code.
+ */
 int phylink_ethtool_nway_reset(struct phylink *pl)
 {
 	int ret = 0;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_restart_aneg(pl->phydev);
@@ -981,10 +1180,15 @@ int phylink_ethtool_nway_reset(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset);
 
+/**
+ * phylink_ethtool_get_pauseparam() - get the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
 void phylink_ethtool_get_pauseparam(struct phylink *pl,
 				    struct ethtool_pauseparam *pause)
 {
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN);
 	pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX);
@@ -992,12 +1196,17 @@ void phylink_ethtool_get_pauseparam(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam);
 
+/**
+ * phylink_ethtool_set_pauseparam() - set the current pause parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pause: a pointer to a &struct ethtool_pauseparam
+ */
 int phylink_ethtool_set_pauseparam(struct phylink *pl,
 				   struct ethtool_pauseparam *pause)
 {
 	struct phylink_link_state *config = &pl->link_config;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (!phylink_test(pl->supported, Pause) &&
 	    !phylink_test(pl->supported, Asym_Pause))
@@ -1030,8 +1239,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
 			phylink_mac_config(pl, config);
 			break;
 
-		case MLO_AN_SGMII:
-		case MLO_AN_8023Z:
+		case MLO_AN_INBAND:
 			phylink_mac_config(pl, config);
 			phylink_mac_an_restart(pl);
 			break;
@@ -1070,24 +1278,21 @@ int phylink_ethtool_get_module_eeprom(struct phylink *pl,
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom);
 
-int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
-{
-	int ret = -EPROTONOSUPPORT;
-
-	WARN_ON(!lockdep_rtnl_is_held());
-
-	if (pl->phydev)
-		ret = phy_init_eee(pl->phydev, clk_stop_enable);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(phylink_init_eee);
-
+/**
+ * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error
+ *   counter
+ * @pl: a pointer to a &struct phylink returned from phylink_create().
+ *
+ * Read the Energy Efficient Ethernet error counter from the PHY associated
+ * with the phylink instance specified by @pl.
+ *
+ * Returns positive error counter value, or negative error code.
+ */
 int phylink_get_eee_err(struct phylink *pl)
 {
 	int ret = 0;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_get_eee_err(pl->phydev);
@@ -1096,11 +1301,16 @@ int phylink_get_eee_err(struct phylink *pl)
 }
 EXPORT_SYMBOL_GPL(phylink_get_eee_err);
 
+/**
+ * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ */
 int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
 {
 	int ret = -EOPNOTSUPP;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_ethtool_get_eee(pl->phydev, eee);
@@ -1109,11 +1319,16 @@ int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
 
+/**
+ * phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ */
 int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
 {
 	int ret = -EOPNOTSUPP;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev)
 		ret = phy_ethtool_set_eee(pl->phydev, eee);
@@ -1248,9 +1463,7 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id,
 	case MLO_AN_PHY:
 		return -EOPNOTSUPP;
 
-	case MLO_AN_SGMII:
-		/* No phy, fall through to 8023z method */
-	case MLO_AN_8023Z:
+	case MLO_AN_INBAND:
 		if (phy_id == 0) {
 			val = phylink_get_mac_state(pl, &state);
 			if (val < 0)
@@ -1275,24 +1488,40 @@ static int phylink_mii_write(struct phylink *pl, unsigned int phy_id,
 	case MLO_AN_PHY:
 		return -EOPNOTSUPP;
 
-	case MLO_AN_SGMII:
-		/* No phy, fall through to 8023z method */
-	case MLO_AN_8023Z:
+	case MLO_AN_INBAND:
 		break;
 	}
 
 	return 0;
 }
 
+/**
+ * phylink_mii_ioctl() - generic mii ioctl interface
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @ifr: a pointer to a &struct ifreq for socket ioctls
+ * @cmd: ioctl cmd to execute
+ *
+ * Perform the specified MII ioctl on the PHY attached to the phylink instance
+ * specified by @pl. If no PHY is attached, emulate the presence of the PHY.
+ *
+ * Returns: zero on success or negative error code.
+ *
+ * %SIOCGMIIPHY:
+ *  read register from the current PHY.
+ * %SIOCGMIIREG:
+ *  read register from the specified PHY.
+ * %SIOCSMIIREG:
+ *  set a register on the specified PHY.
+ */
 int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
 {
 	struct mii_ioctl_data *mii = if_mii(ifr);
 	int  ret;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	if (pl->phydev) {
-		/* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */
+		/* PHYs only exist for MLO_AN_PHY and SGMII */
 		switch (cmd) {
 		case SIOCGMIIPHY:
 			mii->phy_id = pl->phydev->mdio.addr;
@@ -1351,7 +1580,7 @@ static int phylink_sfp_module_insert(void *upstream,
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
 	struct phylink_link_state config;
 	phy_interface_t iface;
-	int mode, ret = 0;
+	int ret = 0;
 	bool changed;
 	u8 port;
 
@@ -1359,14 +1588,13 @@ static int phylink_sfp_module_insert(void *upstream,
 	port = sfp_parse_port(pl->sfp_bus, id, support);
 	iface = sfp_parse_interface(pl->sfp_bus, id);
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	switch (iface) {
 	case PHY_INTERFACE_MODE_SGMII:
-		mode = MLO_AN_SGMII;
-		break;
 	case PHY_INTERFACE_MODE_1000BASEX:
-		mode = MLO_AN_8023Z;
+	case PHY_INTERFACE_MODE_2500BASEX:
+	case PHY_INTERFACE_MODE_10GKR:
 		break;
 	default:
 		return -EINVAL;
@@ -1384,16 +1612,18 @@ static int phylink_sfp_module_insert(void *upstream,
 	ret = phylink_validate(pl, support, &config);
 	if (ret) {
 		netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
-			   phylink_an_mode_str(mode), phy_modes(config.interface),
+			   phylink_an_mode_str(MLO_AN_INBAND),
+			   phy_modes(config.interface),
 			   __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
 		return ret;
 	}
 
 	netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
-		   phylink_an_mode_str(mode), phy_modes(config.interface),
+		   phylink_an_mode_str(MLO_AN_INBAND),
+		   phy_modes(config.interface),
 		   __ETHTOOL_LINK_MODE_MASK_NBITS, support);
 
-	if (mode == MLO_AN_8023Z && pl->phydev)
+	if (phy_interface_mode_is_8023z(iface) && pl->phydev)
 		return -EINVAL;
 
 	changed = !bitmap_equal(pl->supported, support,
@@ -1403,15 +1633,15 @@ static int phylink_sfp_module_insert(void *upstream,
 		linkmode_copy(pl->link_config.advertising, config.advertising);
 	}
 
-	if (pl->link_an_mode != mode ||
+	if (pl->link_an_mode != MLO_AN_INBAND ||
 	    pl->link_config.interface != config.interface) {
 		pl->link_config.interface = config.interface;
-		pl->link_an_mode = mode;
+		pl->link_an_mode = MLO_AN_INBAND;
 
 		changed = true;
 
 		netdev_info(pl->netdev, "switched to %s/%s link mode\n",
-			    phylink_an_mode_str(mode),
+			    phylink_an_mode_str(MLO_AN_INBAND),
 			    phy_modes(config.interface));
 	}
 
@@ -1428,7 +1658,7 @@ static void phylink_sfp_link_down(void *upstream)
 {
 	struct phylink *pl = upstream;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
 	queue_work(system_power_efficient_wq, &pl->resolve);
@@ -1439,7 +1669,7 @@ static void phylink_sfp_link_up(void *upstream)
 {
 	struct phylink *pl = upstream;
 
-	WARN_ON(!lockdep_rtnl_is_held());
+	ASSERT_RTNL();
 
 	clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
 	phylink_run_resolve(pl);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index dbef800..889a4dc 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -118,8 +118,6 @@ static struct phy_driver qs6612_driver[] = { {
 	.features	= PHY_BASIC_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= qs6612_config_init,
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.ack_interrupt	= qs6612_ack_interrupt,
 	.config_intr	= qs6612_config_intr,
 } };
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index eda0a6e..7c1bf68 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -13,29 +13,67 @@
  * option) any later version.
  *
  */
+#include <linux/bitops.h>
 #include <linux/phy.h>
 #include <linux/module.h>
 
-#define RTL821x_PHYSR		0x11
-#define RTL821x_PHYSR_DUPLEX	0x2000
-#define RTL821x_PHYSR_SPEED	0xc000
-#define RTL821x_INER		0x12
-#define RTL821x_INER_INIT	0x6400
-#define RTL821x_INSR		0x13
-#define RTL821x_PAGE_SELECT	0x1f
-#define RTL8211E_INER_LINK_STATUS 0x400
+#define RTL821x_PHYSR				0x11
+#define RTL821x_PHYSR_DUPLEX			BIT(13)
+#define RTL821x_PHYSR_SPEED			GENMASK(15, 14)
 
-#define RTL8211F_INER_LINK_STATUS 0x0010
-#define RTL8211F_INSR		0x1d
-#define RTL8211F_TX_DELAY	0x100
+#define RTL821x_INER				0x12
+#define RTL8211B_INER_INIT			0x6400
+#define RTL8211E_INER_LINK_STATUS		BIT(10)
+#define RTL8211F_INER_LINK_STATUS		BIT(4)
 
-#define RTL8201F_ISR		0x1e
-#define RTL8201F_IER		0x13
+#define RTL821x_INSR				0x13
+
+#define RTL821x_PAGE_SELECT			0x1f
+
+#define RTL8211F_INSR				0x1d
+
+#define RTL8211F_TX_DELAY			BIT(8)
+
+#define RTL8201F_ISR				0x1e
+#define RTL8201F_IER				0x13
 
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
 MODULE_LICENSE("GPL");
 
+static int rtl8211x_page_read(struct phy_device *phydev, u16 page, u16 address)
+{
+	int ret;
+
+	ret = phy_write(phydev, RTL821x_PAGE_SELECT, page);
+	if (ret)
+		return ret;
+
+	ret = phy_read(phydev, address);
+
+	/* restore to default page 0 */
+	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+
+	return ret;
+}
+
+static int rtl8211x_page_write(struct phy_device *phydev, u16 page,
+			       u16 address, u16 val)
+{
+	int ret;
+
+	ret = phy_write(phydev, RTL821x_PAGE_SELECT, page);
+	if (ret)
+		return ret;
+
+	ret = phy_write(phydev, address, val);
+
+	/* restore to default page 0 */
+	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+
+	return ret;
+}
+
 static int rtl8201_ack_interrupt(struct phy_device *phydev)
 {
 	int err;
@@ -58,31 +96,21 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
 {
 	int err;
 
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0xa43);
-	err = phy_read(phydev, RTL8211F_INSR);
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+	err = rtl8211x_page_read(phydev, 0xa43, RTL8211F_INSR);
 
 	return (err < 0) ? err : 0;
 }
 
 static int rtl8201_config_intr(struct phy_device *phydev)
 {
-	int err;
-
-	/* switch to page 7 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x7);
+	u16 val;
 
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		err = phy_write(phydev, RTL8201F_IER,
-				BIT(13) | BIT(12) | BIT(11));
+		val = BIT(13) | BIT(12) | BIT(11);
 	else
-		err = phy_write(phydev, RTL8201F_IER, 0);
+		val = 0;
 
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
-
-	return err;
+	return rtl8211x_page_write(phydev, 0x7, RTL8201F_IER, val);
 }
 
 static int rtl8211b_config_intr(struct phy_device *phydev)
@@ -91,7 +119,7 @@ static int rtl8211b_config_intr(struct phy_device *phydev)
 
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
 		err = phy_write(phydev, RTL821x_INER,
-				RTL821x_INER_INIT);
+				RTL8211B_INER_INIT);
 	else
 		err = phy_write(phydev, RTL821x_INER, 0);
 
@@ -113,41 +141,41 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
 
 static int rtl8211f_config_intr(struct phy_device *phydev)
 {
-	int err;
+	u16 val;
 
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0xa42);
 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		err = phy_write(phydev, RTL821x_INER,
-				RTL8211F_INER_LINK_STATUS);
+		val = RTL8211F_INER_LINK_STATUS;
 	else
-		err = phy_write(phydev, RTL821x_INER, 0);
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0);
+		val = 0;
 
-	return err;
+	return rtl8211x_page_write(phydev, 0xa42, RTL821x_INER, val);
 }
 
 static int rtl8211f_config_init(struct phy_device *phydev)
 {
 	int ret;
-	u16 reg;
+	u16 val;
 
 	ret = genphy_config_init(phydev);
 	if (ret < 0)
 		return ret;
 
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0xd08);
-	reg = phy_read(phydev, 0x11);
+	ret = rtl8211x_page_read(phydev, 0xd08, 0x11);
+	if (ret < 0)
+		return ret;
+
+	val = ret & 0xffff;
 
 	/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
-		reg |= RTL8211F_TX_DELAY;
+		val |= RTL8211F_TX_DELAY;
 	else
-		reg &= ~RTL8211F_TX_DELAY;
+		val &= ~RTL8211F_TX_DELAY;
 
-	phy_write(phydev, 0x11, reg);
-	/* restore to default page 0 */
-	phy_write(phydev, RTL821x_PAGE_SELECT, 0x0);
+	ret = rtl8211x_page_write(phydev, 0xd08, 0x11, val);
+	if (ret)
+		return ret;
 
 	return 0;
 }
@@ -159,16 +187,12 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask    = 0x0000ffff,
 		.features       = PHY_BASIC_FEATURES,
 		.flags          = PHY_HAS_INTERRUPT,
-		.config_aneg    = &genphy_config_aneg,
-		.read_status    = &genphy_read_status,
 	}, {
 		.phy_id		= 0x001cc816,
 		.name		= "RTL8201F 10/100Mbps Ethernet",
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_BASIC_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl8201_ack_interrupt,
 		.config_intr	= &rtl8201_config_intr,
 		.suspend	= genphy_suspend,
@@ -179,8 +203,6 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl821x_ack_interrupt,
 		.config_intr	= &rtl8211b_config_intr,
 	}, {
@@ -189,8 +211,6 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= genphy_config_aneg,
-		.read_status	= genphy_read_status,
 		.ack_interrupt	= rtl821x_ack_interrupt,
 		.config_intr	= rtl8211e_config_intr,
 		.suspend	= genphy_suspend,
@@ -201,8 +221,6 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl821x_ack_interrupt,
 		.config_intr	= &rtl8211e_config_intr,
 		.suspend	= genphy_suspend,
@@ -213,9 +231,7 @@ static struct phy_driver realtek_drvs[] = {
 		.phy_id_mask	= 0x001fffff,
 		.features	= PHY_GBIT_FEATURES,
 		.flags		= PHY_HAS_INTERRUPT,
-		.config_aneg	= &genphy_config_aneg,
 		.config_init	= &rtl8211f_config_init,
-		.read_status	= &genphy_read_status,
 		.ack_interrupt	= &rtl8211f_ack_interrupt,
 		.config_intr	= &rtl8211f_config_intr,
 		.suspend	= genphy_suspend,
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index c092af1..f1da70b 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -213,7 +213,6 @@ static struct phy_driver rockchip_phy_driver[] = {
 	.soft_reset		= genphy_soft_reset,
 	.config_init		= rockchip_integrated_phy_config_init,
 	.config_aneg		= rockchip_config_aneg,
-	.read_status		= genphy_read_status,
 	.suspend		= genphy_suspend,
 	.resume			= rockchip_phy_resume,
 },
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ab64a14..bdc4bb3 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -8,10 +8,14 @@
 
 #include "sfp.h"
 
+/**
+ * struct sfp_bus - internal representation of a sfp bus
+ */
 struct sfp_bus {
+	/* private: */
 	struct kref kref;
 	struct list_head node;
-	struct device_node *device_node;
+	struct fwnode_handle *fwnode;
 
 	const struct sfp_socket_ops *socket_ops;
 	struct device *sfp_dev;
@@ -26,6 +30,20 @@ struct sfp_bus {
 	bool started;
 };
 
+/**
+ * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: optional pointer to an array of unsigned long for the
+ *   ethtool support mask
+ *
+ * Parse the EEPROM identification given in @id, and return one of
+ * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
+ * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
+ * the connector type.
+ *
+ * If the port type is not known, returns %PORT_OTHER.
+ */
 int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		   unsigned long *support)
 {
@@ -39,21 +57,19 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 	case SFP_CONNECTOR_MT_RJ:
 	case SFP_CONNECTOR_MU:
 	case SFP_CONNECTOR_OPTICAL_PIGTAIL:
-		if (support)
-			phylink_set(support, FIBRE);
 		port = PORT_FIBRE;
 		break;
 
 	case SFP_CONNECTOR_RJ45:
-		if (support)
-			phylink_set(support, TP);
 		port = PORT_TP;
 		break;
 
+	case SFP_CONNECTOR_COPPER_PIGTAIL:
+		port = PORT_DA;
+		break;
+
 	case SFP_CONNECTOR_UNSPEC:
 		if (id->base.e1000_base_t) {
-			if (support)
-				phylink_set(support, TP);
 			port = PORT_TP;
 			break;
 		}
@@ -62,7 +78,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 	case SFP_CONNECTOR_MPO_1X12:
 	case SFP_CONNECTOR_MPO_2X16:
 	case SFP_CONNECTOR_HSSDC_II:
-	case SFP_CONNECTOR_COPPER_PIGTAIL:
 	case SFP_CONNECTOR_NOSEPARATE:
 	case SFP_CONNECTOR_MXC_2X16:
 		port = PORT_OTHER;
@@ -74,10 +89,40 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		break;
 	}
 
+	if (support) {
+		switch (port) {
+		case PORT_FIBRE:
+			phylink_set(support, FIBRE);
+			break;
+
+		case PORT_TP:
+			phylink_set(support, TP);
+			break;
+		}
+	}
+
 	return port;
 }
 EXPORT_SYMBOL_GPL(sfp_parse_port);
 
+/**
+ * sfp_parse_interface() - Parse the phy_interface_t
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ *
+ * Derive the phy_interface_t mode for the information found in the
+ * module's identifying EEPROM. There is no standard or defined way
+ * to derive this information, so we use some heuristics.
+ *
+ * If the encoding is 64b66b, then the module must be >= 10G, so
+ * return %PHY_INTERFACE_MODE_10GKR.
+ *
+ * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre
+ * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return
+ * %PHY_INTERFACE_MODE_SGMII mode.
+ *
+ * If the encoding is not known, return %PHY_INTERFACE_MODE_NA.
+ */
 phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
 				    const struct sfp_eeprom_id *id)
 {
@@ -107,6 +152,11 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
 		break;
 
 	default:
+		if (id->base.e1000_base_cx) {
+			iface = PHY_INTERFACE_MODE_1000BASEX;
+			break;
+		}
+
 		iface = PHY_INTERFACE_MODE_NA;
 		dev_err(bus->sfp_dev,
 			"SFP module encoding does not support 8b10b nor 64b66b\n");
@@ -117,13 +167,38 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
 }
 EXPORT_SYMBOL_GPL(sfp_parse_interface);
 
+/**
+ * sfp_parse_support() - Parse the eeprom id for supported link modes
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @support: pointer to an array of unsigned long for the ethtool support mask
+ *
+ * Parse the EEPROM identification information and derive the supported
+ * ethtool link modes for the module.
+ */
 void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		       unsigned long *support)
 {
+	unsigned int br_min, br_nom, br_max;
+
 	phylink_set(support, Autoneg);
 	phylink_set(support, Pause);
 	phylink_set(support, Asym_Pause);
 
+	/* Decode the bitrate information to MBd */
+	br_min = br_nom = br_max = 0;
+	if (id->base.br_nominal) {
+		if (id->base.br_nominal != 255) {
+			br_nom = id->base.br_nominal * 100;
+			br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+			br_max = br_nom + id->base.br_nominal * id->ext.br_max;
+		} else if (id->ext.br_max) {
+			br_nom = 250 * id->ext.br_max;
+			br_max = br_nom + br_nom * id->ext.br_min / 100;
+			br_min = br_nom - br_nom * id->ext.br_min / 100;
+		}
+	}
+
 	/* Set ethtool support from the compliance fields. */
 	if (id->base.e10g_base_sr)
 		phylink_set(support, 10000baseSR_Full);
@@ -142,6 +217,34 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		phylink_set(support, 1000baseT_Full);
 	}
 
+	/* 1000Base-PX or 1000Base-BX10 */
+	if ((id->base.e_base_px || id->base.e_base_bx10) &&
+	    br_min <= 1300 && br_max >= 1200)
+		phylink_set(support, 1000baseX_Full);
+
+	/* For active or passive cables, select the link modes
+	 * based on the bit rates and the cable compliance bytes.
+	 */
+	if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
+		/* This may look odd, but some manufacturers use 12000MBd */
+		if (br_min <= 12000 && br_max >= 10300)
+			phylink_set(support, 10000baseCR_Full);
+		if (br_min <= 3200 && br_max >= 3100)
+			phylink_set(support, 2500baseX_Full);
+		if (br_min <= 1300 && br_max >= 1200)
+			phylink_set(support, 1000baseX_Full);
+	}
+	if (id->base.sfp_ct_passive) {
+		if (id->base.passive.sff8431_app_e)
+			phylink_set(support, 10000baseCR_Full);
+	}
+	if (id->base.sfp_ct_active) {
+		if (id->base.active.sff8431_app_e ||
+		    id->base.active.sff8431_lim) {
+			phylink_set(support, 10000baseCR_Full);
+		}
+	}
+
 	switch (id->base.extended_cc) {
 	case 0x00: /* Unspecified */
 		break;
@@ -175,35 +278,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
 		if (id->base.br_nominal >= 12)
 			phylink_set(support, 1000baseX_Full);
 	}
-
-	switch (id->base.connector) {
-	case SFP_CONNECTOR_SC:
-	case SFP_CONNECTOR_FIBERJACK:
-	case SFP_CONNECTOR_LC:
-	case SFP_CONNECTOR_MT_RJ:
-	case SFP_CONNECTOR_MU:
-	case SFP_CONNECTOR_OPTICAL_PIGTAIL:
-		break;
-
-	case SFP_CONNECTOR_UNSPEC:
-		if (id->base.e1000_base_t)
-			break;
-
-	case SFP_CONNECTOR_SG: /* guess */
-	case SFP_CONNECTOR_MPO_1X12:
-	case SFP_CONNECTOR_MPO_2X16:
-	case SFP_CONNECTOR_HSSDC_II:
-	case SFP_CONNECTOR_COPPER_PIGTAIL:
-	case SFP_CONNECTOR_NOSEPARATE:
-	case SFP_CONNECTOR_MXC_2X16:
-	default:
-		/* a guess at the supported link modes */
-		dev_warn(bus->sfp_dev,
-			 "Guessing link modes, please report...\n");
-		phylink_set(support, 1000baseT_Half);
-		phylink_set(support, 1000baseT_Full);
-		break;
-	}
 }
 EXPORT_SYMBOL_GPL(sfp_parse_support);
 
@@ -215,7 +289,7 @@ static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus)
 	return bus->registered ? bus->upstream_ops : NULL;
 }
 
-static struct sfp_bus *sfp_bus_get(struct device_node *np)
+static struct sfp_bus *sfp_bus_get(struct fwnode_handle *fwnode)
 {
 	struct sfp_bus *sfp, *new, *found = NULL;
 
@@ -224,7 +298,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
 	mutex_lock(&sfp_mutex);
 
 	list_for_each_entry(sfp, &sfp_buses, node) {
-		if (sfp->device_node == np) {
+		if (sfp->fwnode == fwnode) {
 			kref_get(&sfp->kref);
 			found = sfp;
 			break;
@@ -233,7 +307,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
 
 	if (!found && new) {
 		kref_init(&new->kref);
-		new->device_node = np;
+		new->fwnode = fwnode;
 		list_add(&new->node, &sfp_buses);
 		found = new;
 		new = NULL;
@@ -246,7 +320,7 @@ static struct sfp_bus *sfp_bus_get(struct device_node *np)
 	return found;
 }
 
-static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex)
+static void sfp_bus_release(struct kref *kref)
 {
 	struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref);
 
@@ -293,6 +367,16 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
 	bus->registered = false;
 }
 
+/**
+ * sfp_get_module_info() - Get the ethtool_modinfo for a SFP module
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @modinfo: a &struct ethtool_modinfo
+ *
+ * Fill in the type and eeprom_len parameters in @modinfo for a module on
+ * the sfp bus specified by @bus.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
 int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
 {
 	if (!bus->registered)
@@ -301,6 +385,17 @@ int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
 }
 EXPORT_SYMBOL_GPL(sfp_get_module_info);
 
+/**
+ * sfp_get_module_eeprom() - Read the SFP module EEPROM
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @ee: a &struct ethtool_eeprom
+ * @data: buffer to contain the EEPROM data (must be at least @ee->len bytes)
+ *
+ * Read the EEPROM as specified by the supplied @ee. See the documentation
+ * for &struct ethtool_eeprom for the region to be read.
+ *
+ * Returns 0 on success or a negative errno number.
+ */
 int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
 			  u8 *data)
 {
@@ -310,6 +405,15 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
 }
 EXPORT_SYMBOL_GPL(sfp_get_module_eeprom);
 
+/**
+ * sfp_upstream_start() - Inform the SFP that the network device is up
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be enabled by allowing TX_DISABLE to be deasserted. This
+ * should be called from the network device driver's &struct net_device_ops
+ * ndo_open() method.
+ */
 void sfp_upstream_start(struct sfp_bus *bus)
 {
 	if (bus->registered)
@@ -318,6 +422,15 @@ void sfp_upstream_start(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_upstream_start);
 
+/**
+ * sfp_upstream_stop() - Inform the SFP that the network device is down
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Inform the SFP socket that the network device is now up, so that the
+ * module can be disabled by asserting TX_DISABLE, disabling the laser
+ * in optical modules. This should be called from the network device
+ * driver's &struct net_device_ops ndo_stop() method.
+ */
 void sfp_upstream_stop(struct sfp_bus *bus)
 {
 	if (bus->registered)
@@ -326,11 +439,24 @@ void sfp_upstream_stop(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_upstream_stop);
 
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+/**
+ * sfp_register_upstream() - Register the neighbouring device
+ * @np: device node for the SFP bus
+ * @ndev: network device associated with the interface
+ * @upstream: the upstream private data
+ * @ops: the upstream's &struct sfp_upstream_ops
+ *
+ * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
+ * should use phylink, which will call this function for them. Returns
+ * a pointer to the allocated &struct sfp_bus.
+ *
+ * On error, returns %NULL.
+ */
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
 				      struct net_device *ndev, void *upstream,
 				      const struct sfp_upstream_ops *ops)
 {
-	struct sfp_bus *bus = sfp_bus_get(np);
+	struct sfp_bus *bus = sfp_bus_get(fwnode);
 	int ret = 0;
 
 	if (bus) {
@@ -353,6 +479,13 @@ struct sfp_bus *sfp_register_upstream(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(sfp_register_upstream);
 
+/**
+ * sfp_unregister_upstream() - Unregister sfp bus
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ *
+ * Unregister a previously registered upstream connection for the SFP
+ * module. @bus is returned from sfp_register_upstream().
+ */
 void sfp_unregister_upstream(struct sfp_bus *bus)
 {
 	rtnl_lock();
@@ -434,7 +567,7 @@ EXPORT_SYMBOL_GPL(sfp_module_remove);
 struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
 				    const struct sfp_socket_ops *ops)
 {
-	struct sfp_bus *bus = sfp_bus_get(dev->of_node);
+	struct sfp_bus *bus = sfp_bus_get(dev->fwnode);
 	int ret = 0;
 
 	if (bus) {
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 9dfc1c4..6c7d928 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -98,12 +98,18 @@ static const enum gpiod_flags gpio_flags[] = {
 
 static DEFINE_MUTEX(sfp_mutex);
 
+struct sff_data {
+	unsigned int gpios;
+	bool (*module_supported)(const struct sfp_eeprom_id *id);
+};
+
 struct sfp {
 	struct device *dev;
 	struct i2c_adapter *i2c;
 	struct mii_bus *i2c_mii;
 	struct sfp_bus *sfp_bus;
 	struct phy_device *mod_phy;
+	const struct sff_data *type;
 
 	unsigned int (*get_state)(struct sfp *);
 	void (*set_state)(struct sfp *, unsigned int);
@@ -123,6 +129,36 @@ struct sfp {
 	struct sfp_eeprom_id id;
 };
 
+static bool sff_module_supported(const struct sfp_eeprom_id *id)
+{
+	return id->base.phys_id == SFP_PHYS_ID_SFF &&
+	       id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sff_data = {
+	.gpios = SFP_F_LOS | SFP_F_TX_FAULT | SFP_F_TX_DISABLE,
+	.module_supported = sff_module_supported,
+};
+
+static bool sfp_module_supported(const struct sfp_eeprom_id *id)
+{
+	return id->base.phys_id == SFP_PHYS_ID_SFP &&
+	       id->base.phys_ext_id == SFP_PHYS_EXT_ID_SFP;
+}
+
+static const struct sff_data sfp_data = {
+	.gpios = SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT |
+		 SFP_F_TX_DISABLE | SFP_F_RATE_SELECT,
+	.module_supported = sfp_module_supported,
+};
+
+static const struct of_device_id sfp_of_match[] = {
+	{ .compatible = "sff,sff", .data = &sff_data, },
+	{ .compatible = "sff,sfp", .data = &sfp_data, },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, sfp_of_match);
+
 static unsigned long poll_jiffies;
 
 static unsigned int sfp_gpio_get_state(struct sfp *sfp)
@@ -141,6 +177,11 @@ static unsigned int sfp_gpio_get_state(struct sfp *sfp)
 	return state;
 }
 
+static unsigned int sff_gpio_get_state(struct sfp *sfp)
+{
+	return sfp_gpio_get_state(sfp) | SFP_F_PRESENT;
+}
+
 static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state)
 {
 	if (state & SFP_F_PRESENT) {
@@ -315,12 +356,12 @@ static void sfp_sm_probe_phy(struct sfp *sfp)
 	msleep(T_PHY_RESET_MS);
 
 	phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR);
-	if (IS_ERR(phy)) {
-		dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
+	if (phy == ERR_PTR(-ENODEV)) {
+		dev_info(sfp->dev, "no PHY detected\n");
 		return;
 	}
-	if (!phy) {
-		dev_info(sfp->dev, "no PHY detected\n");
+	if (IS_ERR(phy)) {
+		dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy));
 		return;
 	}
 
@@ -425,11 +466,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
 {
 	/* SFP module inserted - read I2C data */
 	struct sfp_eeprom_id id;
-	char vendor[17];
-	char part[17];
-	char sn[17];
-	char date[9];
-	char rev[5];
 	u8 check;
 	int err;
 
@@ -465,24 +501,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
 
 	sfp->id = id;
 
-	memcpy(vendor, sfp->id.base.vendor_name, 16);
-	vendor[16] = '\0';
-	memcpy(part, sfp->id.base.vendor_pn, 16);
-	part[16] = '\0';
-	memcpy(rev, sfp->id.base.vendor_rev, 4);
-	rev[4] = '\0';
-	memcpy(sn, sfp->id.ext.vendor_sn, 16);
-	sn[16] = '\0';
-	memcpy(date, sfp->id.ext.datecode, 8);
-	date[8] = '\0';
+	dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n",
+		 (int)sizeof(id.base.vendor_name), id.base.vendor_name,
+		 (int)sizeof(id.base.vendor_pn), id.base.vendor_pn,
+		 (int)sizeof(id.base.vendor_rev), id.base.vendor_rev,
+		 (int)sizeof(id.ext.vendor_sn), id.ext.vendor_sn,
+		 (int)sizeof(id.ext.datecode), id.ext.datecode);
 
-	dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n",
-		 vendor, part, rev, sn, date);
-
-	/* We only support SFP modules, not the legacy GBIC modules. */
-	if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP ||
-	    sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) {
-		dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n",
+	/* Check whether we support this module */
+	if (!sfp->type->module_supported(&sfp->id)) {
+		dev_err(sfp->dev,
+			"module is not supported - phys id 0x%02x 0x%02x\n",
 			sfp->id.base.phys_id, sfp->id.base.phys_ext_id);
 		return -EINVAL;
 	}
@@ -683,20 +712,19 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
 		len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN);
 		len -= first;
 
-		ret = sfp->read(sfp, false, first, data, len);
+		ret = sfp_read(sfp, false, first, data, len);
 		if (ret < 0)
 			return ret;
 
 		first += len;
 		data += len;
 	}
-	if (first >= ETH_MODULE_SFF_8079_LEN &&
-	    first < ETH_MODULE_SFF_8472_LEN) {
+	if (first < ETH_MODULE_SFF_8472_LEN && last > ETH_MODULE_SFF_8079_LEN) {
 		len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN);
 		len -= first;
 		first -= ETH_MODULE_SFF_8079_LEN;
 
-		ret = sfp->read(sfp, true, first, data, len);
+		ret = sfp_read(sfp, true, first, data, len);
 		if (ret < 0)
 			return ret;
 	}
@@ -801,6 +829,7 @@ static void sfp_cleanup(void *data)
 
 static int sfp_probe(struct platform_device *pdev)
 {
+	const struct sff_data *sff;
 	struct sfp *sfp;
 	bool poll = false;
 	int irq, err, i;
@@ -815,10 +844,19 @@ static int sfp_probe(struct platform_device *pdev)
 	if (err < 0)
 		return err;
 
+	sff = sfp->type = &sfp_data;
+
 	if (pdev->dev.of_node) {
 		struct device_node *node = pdev->dev.of_node;
+		const struct of_device_id *id;
 		struct device_node *np;
 
+		id = of_match_node(sfp_of_match, node);
+		if (WARN_ON(!id))
+			return -EINVAL;
+
+		sff = sfp->type = id->data;
+
 		np = of_parse_phandle(node, "i2c-bus", 0);
 		if (np) {
 			struct i2c_adapter *i2c;
@@ -834,17 +872,22 @@ static int sfp_probe(struct platform_device *pdev)
 				return err;
 			}
 		}
+	}
 
-		for (i = 0; i < GPIO_MAX; i++) {
+	for (i = 0; i < GPIO_MAX; i++)
+		if (sff->gpios & BIT(i)) {
 			sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev,
 					   gpio_of_names[i], gpio_flags[i]);
 			if (IS_ERR(sfp->gpio[i]))
 				return PTR_ERR(sfp->gpio[i]);
 		}
 
-		sfp->get_state = sfp_gpio_get_state;
-		sfp->set_state = sfp_gpio_set_state;
-	}
+	sfp->get_state = sfp_gpio_get_state;
+	sfp->set_state = sfp_gpio_set_state;
+
+	/* Modules that have no detect signal are always present */
+	if (!(sfp->gpio[GPIO_MODDEF0]))
+		sfp->get_state = sff_gpio_get_state;
 
 	sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
 	if (!sfp->sfp_bus)
@@ -899,12 +942,6 @@ static int sfp_remove(struct platform_device *pdev)
 	return 0;
 }
 
-static const struct of_device_id sfp_of_match[] = {
-	{ .compatible = "sff,sfp", },
-	{ },
-};
-MODULE_DEVICE_TABLE(of, sfp_of_match);
-
 static struct platform_driver sfp_driver = {
 	.probe = sfp_probe,
 	.remove = sfp_remove,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2306bfa..be399d6 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -227,8 +227,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
 
@@ -249,8 +247,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
 
@@ -276,7 +272,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= lan87xx_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
@@ -303,8 +298,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
 	.config_init	= lan911x_config_init,
 
 	/* IRQ related */
@@ -319,12 +312,11 @@ static struct phy_driver smsc_phy_driver[] = {
 	.name		= "SMSC LAN8710/LAN8720",
 
 	.features	= PHY_BASIC_FEATURES,
-	.flags		= PHY_HAS_INTERRUPT,
+	.flags		= PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
 
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= lan87xx_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
@@ -351,7 +343,6 @@ static struct phy_driver smsc_phy_driver[] = {
 	.probe		= smsc_phy_probe,
 
 	/* basic functions */
-	.config_aneg	= genphy_config_aneg,
 	.read_status	= lan87xx_read_status,
 	.config_init	= smsc_phy_config_init,
 	.soft_reset	= smsc_phy_reset,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index d00cfb6..fbd548a 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -89,8 +89,6 @@ static struct phy_driver ste10xp_pdriver[] = {
 	.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
 	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ste10Xp_config_init,
-	.config_aneg = genphy_config_aneg,
-	.read_status = genphy_read_status,
 	.ack_interrupt = ste10Xp_ack_interrupt,
 	.config_intr = ste10Xp_config_intr,
 	.suspend = genphy_suspend,
@@ -102,8 +100,6 @@ static struct phy_driver ste10xp_pdriver[] = {
 	.features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
 	.flags = PHY_HAS_INTERRUPT,
 	.config_init = ste10Xp_config_init,
-	.config_aneg = genphy_config_aneg,
-	.read_status = genphy_read_status,
 	.ack_interrupt = ste10Xp_ack_interrupt,
 	.config_intr = ste10Xp_config_intr,
 	.suspend = genphy_suspend,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 96b3347..55f48ee 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -95,7 +95,6 @@ static struct phy_driver upd60620_driver[1] = { {
 	.features       = PHY_BASIC_FEATURES,
 	.flags          = 0,
 	.config_init    = upd60620_config_init,
-	.config_aneg    = genphy_config_aneg,
 	.read_status    = upd60620_read_status,
 } };
 
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index f78ff02..d9dd8fb 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -267,7 +267,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -278,7 +277,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc824x_config_init,
 	.config_aneg	= &vsc82x4_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 }, {
@@ -289,7 +287,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc824x_config_init,
 	.config_aneg	= &vsc82x4_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 }, {
@@ -300,7 +297,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -311,7 +307,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -321,8 +316,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.features       = PHY_GBIT_FEATURES,
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc8601_config_init,
-	.config_aneg    = &genphy_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -333,7 +326,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.flags          = PHY_HAS_INTERRUPT,
 	.config_init    = &vsc824x_config_init,
 	.config_aneg    = &vsc82x4_config_aneg,
-	.read_status    = &genphy_read_status,
 	.ack_interrupt  = &vsc824x_ack_interrupt,
 	.config_intr    = &vsc82xx_config_intr,
 }, {
@@ -344,8 +336,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc8221_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 }, {
@@ -356,8 +346,6 @@ static struct phy_driver vsc82xx_driver[] = {
 	.features	= PHY_GBIT_FEATURES,
 	.flags		= PHY_HAS_INTERRUPT,
 	.config_init	= &vsc8221_config_init,
-	.config_aneg	= &genphy_config_aneg,
-	.read_status	= &genphy_read_status,
 	.ack_interrupt	= &vsc824x_ack_interrupt,
 	.config_intr	= &vsc82xx_config_intr,
 } };
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc63102..8940417 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -731,7 +731,7 @@ static void sl_sync(void)
 
 
 /* Find a free SLIP channel, and link in this `tty' line. */
-static struct slip *sl_alloc(dev_t line)
+static struct slip *sl_alloc(void)
 {
 	int i;
 	char name[IFNAMSIZ];
@@ -809,7 +809,7 @@ static int slip_open(struct tty_struct *tty)
 
 	/* OK.  Find a free SLIP channel to use. */
 	err = -ENFILE;
-	sl = sl_alloc(tty_devnum(tty));
+	sl = sl_alloc();
 	if (sl == NULL)
 		goto err_exit;
 
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 0a886fda..7c38659 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -330,7 +330,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 	if (!q)
 		return RX_HANDLER_PASS;
 
-	if (__skb_array_full(&q->skb_array))
+	if (__ptr_ring_full(&q->ring))
 		goto drop;
 
 	skb_push(skb, ETH_HLEN);
@@ -348,7 +348,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 			goto drop;
 
 		if (!segs) {
-			if (skb_array_produce(&q->skb_array, skb))
+			if (ptr_ring_produce(&q->ring, skb))
 				goto drop;
 			goto wake_up;
 		}
@@ -358,7 +358,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 			struct sk_buff *nskb = segs->next;
 
 			segs->next = NULL;
-			if (skb_array_produce(&q->skb_array, segs)) {
+			if (ptr_ring_produce(&q->ring, segs)) {
 				kfree_skb(segs);
 				kfree_skb_list(nskb);
 				break;
@@ -375,7 +375,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
 		    !(features & NETIF_F_CSUM_MASK) &&
 		    skb_checksum_help(skb))
 			goto drop;
-		if (skb_array_produce(&q->skb_array, skb))
+		if (ptr_ring_produce(&q->ring, skb))
 			goto drop;
 	}
 
@@ -497,7 +497,7 @@ static void tap_sock_destruct(struct sock *sk)
 {
 	struct tap_queue *q = container_of(sk, struct tap_queue, sk);
 
-	skb_array_cleanup(&q->skb_array);
+	ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
 }
 
 static int tap_open(struct inode *inode, struct file *file)
@@ -517,7 +517,7 @@ static int tap_open(struct inode *inode, struct file *file)
 					     &tap_proto, 0);
 	if (!q)
 		goto err;
-	if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+	if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
 		sk_free(&q->sk);
 		goto err;
 	}
@@ -546,7 +546,7 @@ static int tap_open(struct inode *inode, struct file *file)
 
 	err = tap_set_queue(tap, file, q);
 	if (err) {
-		/* tap_sock_destruct() will take care of freeing skb_array */
+		/* tap_sock_destruct() will take care of freeing ptr_ring */
 		goto err_put;
 	}
 
@@ -583,7 +583,7 @@ static unsigned int tap_poll(struct file *file, poll_table *wait)
 	mask = 0;
 	poll_wait(file, &q->wq.wait, wait);
 
-	if (!skb_array_empty(&q->skb_array))
+	if (!ptr_ring_empty(&q->ring))
 		mask |= POLLIN | POLLRDNORM;
 
 	if (sock_writeable(&q->sk) ||
@@ -844,7 +844,7 @@ static ssize_t tap_do_read(struct tap_queue *q,
 					TASK_INTERRUPTIBLE);
 
 		/* Read frames from the queue */
-		skb = skb_array_consume(&q->skb_array);
+		skb = ptr_ring_consume(&q->ring);
 		if (skb)
 			break;
 		if (noblock) {
@@ -1176,7 +1176,7 @@ static int tap_peek_len(struct socket *sock)
 {
 	struct tap_queue *q = container_of(sock, struct tap_queue,
 					       sock);
-	return skb_array_peek_len(&q->skb_array);
+	return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
 }
 
 /* Ops structure to mimic raw sockets with tun */
@@ -1202,7 +1202,7 @@ struct socket *tap_get_socket(struct file *file)
 }
 EXPORT_SYMBOL_GPL(tap_get_socket);
 
-struct skb_array *tap_get_skb_array(struct file *file)
+struct ptr_ring *tap_get_ptr_ring(struct file *file)
 {
 	struct tap_queue *q;
 
@@ -1211,29 +1211,30 @@ struct skb_array *tap_get_skb_array(struct file *file)
 	q = file->private_data;
 	if (!q)
 		return ERR_PTR(-EBADFD);
-	return &q->skb_array;
+	return &q->ring;
 }
-EXPORT_SYMBOL_GPL(tap_get_skb_array);
+EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
 
 int tap_queue_resize(struct tap_dev *tap)
 {
 	struct net_device *dev = tap->dev;
 	struct tap_queue *q;
-	struct skb_array **arrays;
+	struct ptr_ring **rings;
 	int n = tap->numqueues;
 	int ret, i = 0;
 
-	arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
-	if (!arrays)
+	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+	if (!rings)
 		return -ENOMEM;
 
 	list_for_each_entry(q, &tap->queue_list, next)
-		arrays[i++] = &q->skb_array;
+		rings[i++] = &q->ring;
 
-	ret = skb_array_resize_multiple(arrays, n,
-					dev->tx_queue_len, GFP_KERNEL);
+	ret = ptr_ring_resize_multiple(rings, n,
+				       dev->tx_queue_len, GFP_KERNEL,
+				       __skb_array_destroy_skb);
 
-	kfree(arrays);
+	kfree(rings);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(tap_queue_resize);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4f4a842..2fba3be 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -179,7 +179,8 @@ struct tun_file {
 	struct mutex napi_mutex;	/* Protects access to the above napi */
 	struct list_head next;
 	struct tun_struct *detached;
-	struct skb_array tx_array;
+	struct ptr_ring tx_ring;
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct tun_flow_entry {
@@ -195,6 +196,11 @@ struct tun_flow_entry {
 
 #define TUN_NUM_FLOW_ENTRIES 1024
 
+struct tun_steering_prog {
+	struct rcu_head rcu;
+	struct bpf_prog *prog;
+};
+
 /* Since the socket were moved to tun_file, to preserve the behavior of persist
  * device, socket filter, sndbuf and vnet header size were restore when the
  * file were attached to a persist device.
@@ -232,8 +238,27 @@ struct tun_struct {
 	u32 rx_batched;
 	struct tun_pcpu_stats __percpu *pcpu_stats;
 	struct bpf_prog __rcu *xdp_prog;
+	struct tun_steering_prog __rcu *steering_prog;
 };
 
+bool tun_is_xdp_buff(void *ptr)
+{
+	return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+EXPORT_SYMBOL(tun_is_xdp_buff);
+
+void *tun_xdp_to_ptr(void *ptr)
+{
+	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_xdp_to_ptr);
+
+void *tun_ptr_to_xdp(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
+EXPORT_SYMBOL(tun_ptr_to_xdp);
+
 static int tun_napi_receive(struct napi_struct *napi, int budget)
 {
 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -537,15 +562,12 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
  * different rxq no. here. If we could not get rxhash, then we would
  * hope the rxq no. may help here.
  */
-static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
-			    void *accel_priv, select_queue_fallback_t fallback)
+static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 {
-	struct tun_struct *tun = netdev_priv(dev);
 	struct tun_flow_entry *e;
 	u32 txq = 0;
 	u32 numqueues = 0;
 
-	rcu_read_lock();
 	numqueues = READ_ONCE(tun->numqueues);
 
 	txq = __skb_get_hash_symmetric(skb);
@@ -563,10 +585,37 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 			txq -= numqueues;
 	}
 
-	rcu_read_unlock();
 	return txq;
 }
 
+static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
+{
+	struct tun_steering_prog *prog;
+	u16 ret = 0;
+
+	prog = rcu_dereference(tun->steering_prog);
+	if (prog)
+		ret = bpf_prog_run_clear_cb(prog->prog, skb);
+
+	return ret % tun->numqueues;
+}
+
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+			    void *accel_priv, select_queue_fallback_t fallback)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	u16 ret;
+
+	rcu_read_lock();
+	if (rcu_dereference(tun->steering_prog))
+		ret = tun_ebpf_select_queue(tun, skb);
+	else
+		ret = tun_automq_select_queue(tun, skb);
+	rcu_read_unlock();
+
+	return ret;
+}
+
 static inline bool tun_not_capable(struct tun_struct *tun)
 {
 	const struct cred *cred = current_cred();
@@ -600,12 +649,25 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 	return tun;
 }
 
+static void tun_ptr_free(void *ptr)
+{
+	if (!ptr)
+		return;
+	if (tun_is_xdp_buff(ptr)) {
+		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+		put_page(virt_to_head_page(xdp->data));
+	} else {
+		__skb_array_destroy_skb(ptr);
+	}
+}
+
 static void tun_queue_purge(struct tun_file *tfile)
 {
-	struct sk_buff *skb;
+	void *ptr;
 
-	while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
-		kfree_skb(skb);
+	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
+		tun_ptr_free(ptr);
 
 	skb_queue_purge(&tfile->sk.sk_write_queue);
 	skb_queue_purge(&tfile->sk.sk_error_queue);
@@ -657,8 +719,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
 			    tun->dev->reg_state == NETREG_REGISTERED)
 				unregister_netdevice(tun->dev);
 		}
-		if (tun)
-			skb_array_cleanup(&tfile->tx_array);
+		if (tun) {
+			ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+			xdp_rxq_info_unreg(&tfile->xdp_rxq);
+		}
 		sock_put(&tfile->sk);
 	}
 }
@@ -673,7 +737,6 @@ static void tun_detach(struct tun_file *tfile, bool clean)
 static void tun_detach_all(struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
-	struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog);
 	struct tun_file *tfile, *tmp;
 	int i, n = tun->numqueues;
 
@@ -699,18 +762,17 @@ static void tun_detach_all(struct net_device *dev)
 		tun_napi_del(tun, tfile);
 		/* Drop read queue */
 		tun_queue_purge(tfile);
+		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 		sock_put(&tfile->sk);
 	}
 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 		tun_enable_queue(tfile);
 		tun_queue_purge(tfile);
+		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 		sock_put(&tfile->sk);
 	}
 	BUG_ON(tun->numdisabled != 0);
 
-	if (xdp_prog)
-		bpf_prog_put(xdp_prog);
-
 	if (tun->flags & IFF_PERSIST)
 		module_put(THIS_MODULE);
 }
@@ -751,13 +813,29 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
 	}
 
 	if (!tfile->detached &&
-	    skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
+	    ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
 		err = -ENOMEM;
 		goto out;
 	}
 
 	tfile->queue_index = tun->numqueues;
 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
+
+	if (tfile->detached) {
+		/* Re-attach detached tfile, updating XDP queue_index */
+		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
+
+		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
+			tfile->xdp_rxq.queue_index = tfile->queue_index;
+	} else {
+		/* Setup XDP RX-queue info, for new tfile getting attached */
+		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
+				       tun->dev, tfile->queue_index);
+		if (err < 0)
+			goto out;
+		err = 0;
+	}
+
 	rcu_assign_pointer(tfile->tun, tun);
 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 	tun->numqueues++;
@@ -937,23 +1015,10 @@ static int tun_net_close(struct net_device *dev)
 }
 
 /* Net device start xmit */
-static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
 {
-	struct tun_struct *tun = netdev_priv(dev);
-	int txq = skb->queue_mapping;
-	struct tun_file *tfile;
-	u32 numqueues = 0;
-
-	rcu_read_lock();
-	tfile = rcu_dereference(tun->tfiles[txq]);
-	numqueues = READ_ONCE(tun->numqueues);
-
-	/* Drop packet if interface is not attached */
-	if (txq >= numqueues)
-		goto drop;
-
 #ifdef CONFIG_RPS
-	if (numqueues == 1 && static_key_false(&rps_needed)) {
+	if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
 		/* Select queue was not called for the skbuff, so we extract the
 		 * RPS hash and save it into the flow_table here.
 		 */
@@ -969,6 +1034,24 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 		}
 	}
 #endif
+}
+
+/* Net device start xmit */
+static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	int txq = skb->queue_mapping;
+	struct tun_file *tfile;
+
+	rcu_read_lock();
+	tfile = rcu_dereference(tun->tfiles[txq]);
+
+	/* Drop packet if interface is not attached */
+	if (txq >= tun->numqueues)
+		goto drop;
+
+	if (!rcu_dereference(tun->steering_prog))
+		tun_automq_xmit(tun, skb);
 
 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
@@ -996,7 +1079,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	nf_reset(skb);
 
-	if (skb_array_produce(&tfile->tx_array, skb))
+	if (ptr_ring_produce(&tfile->tx_ring, skb))
 		goto drop;
 
 	/* Notify and wake up reader process */
@@ -1169,6 +1252,67 @@ static const struct net_device_ops tun_netdev_ops = {
 	.ndo_get_stats64	= tun_net_get_stats64,
 };
 
+static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	struct xdp_buff *buff = xdp->data_hard_start;
+	int headroom = xdp->data - xdp->data_hard_start;
+	struct tun_file *tfile;
+	u32 numqueues;
+	int ret = 0;
+
+	/* Assure headroom is available and buff is properly aligned */
+	if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
+		return -ENOSPC;
+
+	*buff = *xdp;
+
+	rcu_read_lock();
+
+	numqueues = READ_ONCE(tun->numqueues);
+	if (!numqueues) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+					    numqueues]);
+	/* Encode the XDP flag into lowest bit for consumer to differ
+	 * XDP buffer from sk_buff.
+	 */
+	if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
+		this_cpu_inc(tun->pcpu_stats->tx_dropped);
+		ret = -ENOSPC;
+	}
+
+out:
+	rcu_read_unlock();
+	return ret;
+}
+
+static void tun_xdp_flush(struct net_device *dev)
+{
+	struct tun_struct *tun = netdev_priv(dev);
+	struct tun_file *tfile;
+	u32 numqueues;
+
+	rcu_read_lock();
+
+	numqueues = READ_ONCE(tun->numqueues);
+	if (!numqueues)
+		goto out;
+
+	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
+					    numqueues]);
+	/* Notify and wake up reader process */
+	if (tfile->flags & TUN_FASYNC)
+		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+
+out:
+	rcu_read_unlock();
+}
+
 static const struct net_device_ops tap_netdev_ops = {
 	.ndo_uninit		= tun_net_uninit,
 	.ndo_open		= tun_net_open,
@@ -1186,6 +1330,8 @@ static const struct net_device_ops tap_netdev_ops = {
 	.ndo_set_rx_headroom	= tun_set_headroom,
 	.ndo_get_stats64	= tun_net_get_stats64,
 	.ndo_bpf		= tun_xdp,
+	.ndo_xdp_xmit		= tun_xdp_xmit,
+	.ndo_xdp_flush		= tun_xdp_flush,
 };
 
 static void tun_flow_init(struct tun_struct *tun)
@@ -1264,7 +1410,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 
 	poll_wait(file, sk_sleep(sk), wait);
 
-	if (!skb_array_empty(&tfile->tx_array))
+	if (!ptr_ring_empty(&tfile->tx_ring))
 		mask |= POLLIN | POLLRDNORM;
 
 	if (tun->dev->flags & IFF_UP &&
@@ -1477,6 +1623,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
 		xdp.data = buf + pad;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + len;
+		xdp.rxq = &tfile->xdp_rxq;
 		orig_data = xdp.data;
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
@@ -1551,7 +1698,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 	int copylen;
 	bool zerocopy = false;
 	int err;
-	u32 rxhash;
+	u32 rxhash = 0;
 	int skb_xdp = 1;
 	bool frags = tun_napi_frags_enabled(tun);
 
@@ -1739,7 +1886,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 		rcu_read_unlock();
 	}
 
-	rxhash = __skb_get_hash_symmetric(skb);
+	rcu_read_lock();
+	if (!rcu_dereference(tun->steering_prog))
+		rxhash = __skb_get_hash_symmetric(skb);
+	rcu_read_unlock();
 
 	if (frags) {
 		/* Exercise flow dissector code path. */
@@ -1783,7 +1933,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 	u64_stats_update_end(&stats->syncp);
 	put_cpu_ptr(stats);
 
-	tun_flow_update(tun, rxhash, tfile);
+	if (rxhash)
+		tun_flow_update(tun, rxhash, tfile);
+
 	return total_len;
 }
 
@@ -1804,6 +1956,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
 	return result;
 }
 
+static ssize_t tun_put_user_xdp(struct tun_struct *tun,
+				struct tun_file *tfile,
+				struct xdp_buff *xdp,
+				struct iov_iter *iter)
+{
+	int vnet_hdr_sz = 0;
+	size_t size = xdp->data_end - xdp->data;
+	struct tun_pcpu_stats *stats;
+	size_t ret;
+
+	if (tun->flags & IFF_VNET_HDR) {
+		struct virtio_net_hdr gso = { 0 };
+
+		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
+			return -EINVAL;
+		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
+			     sizeof(gso)))
+			return -EFAULT;
+		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
+	}
+
+	ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
+
+	stats = get_cpu_ptr(tun->pcpu_stats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->tx_packets++;
+	stats->tx_bytes += ret;
+	u64_stats_update_end(&stats->syncp);
+	put_cpu_ptr(tun->pcpu_stats);
+
+	return ret;
+}
+
 /* Put packet to the user space buffer */
 static ssize_t tun_put_user(struct tun_struct *tun,
 			    struct tun_file *tfile,
@@ -1901,15 +2087,14 @@ static ssize_t tun_put_user(struct tun_struct *tun,
 	return total;
 }
 
-static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
-				     int *err)
+static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
 {
 	DECLARE_WAITQUEUE(wait, current);
-	struct sk_buff *skb = NULL;
+	void *ptr = NULL;
 	int error = 0;
 
-	skb = skb_array_consume(&tfile->tx_array);
-	if (skb)
+	ptr = ptr_ring_consume(&tfile->tx_ring);
+	if (ptr)
 		goto out;
 	if (noblock) {
 		error = -EAGAIN;
@@ -1920,8 +2105,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
 	current->state = TASK_INTERRUPTIBLE;
 
 	while (1) {
-		skb = skb_array_consume(&tfile->tx_array);
-		if (skb)
+		ptr = ptr_ring_consume(&tfile->tx_ring);
+		if (ptr)
 			break;
 		if (signal_pending(current)) {
 			error = -ERESTARTSYS;
@@ -1940,12 +2125,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
 
 out:
 	*err = error;
-	return skb;
+	return ptr;
 }
 
 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 			   struct iov_iter *to,
-			   int noblock, struct sk_buff *skb)
+			   int noblock, void *ptr)
 {
 	ssize_t ret;
 	int err;
@@ -1953,23 +2138,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
 	tun_debug(KERN_INFO, tun, "tun_do_read\n");
 
 	if (!iov_iter_count(to)) {
-		if (skb)
-			kfree_skb(skb);
+		tun_ptr_free(ptr);
 		return 0;
 	}
 
-	if (!skb) {
+	if (!ptr) {
 		/* Read frames from ring */
-		skb = tun_ring_recv(tfile, noblock, &err);
-		if (!skb)
+		ptr = tun_ring_recv(tfile, noblock, &err);
+		if (!ptr)
 			return err;
 	}
 
-	ret = tun_put_user(tun, tfile, skb, to);
-	if (unlikely(ret < 0))
-		kfree_skb(skb);
-	else
-		consume_skb(skb);
+	if (tun_is_xdp_buff(ptr)) {
+		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+		ret = tun_put_user_xdp(tun, tfile, xdp, to);
+		put_page(virt_to_head_page(xdp->data));
+	} else {
+		struct sk_buff *skb = ptr;
+
+		ret = tun_put_user(tun, tfile, skb, to);
+		if (unlikely(ret < 0))
+			kfree_skb(skb);
+		else
+			consume_skb(skb);
+	}
 
 	return ret;
 }
@@ -1991,6 +2184,39 @@ static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
 	return ret;
 }
 
+static void tun_steering_prog_free(struct rcu_head *rcu)
+{
+	struct tun_steering_prog *prog = container_of(rcu,
+					 struct tun_steering_prog, rcu);
+
+	bpf_prog_destroy(prog->prog);
+	kfree(prog);
+}
+
+static int __tun_set_steering_ebpf(struct tun_struct *tun,
+				   struct bpf_prog *prog)
+{
+	struct tun_steering_prog *old, *new = NULL;
+
+	if (prog) {
+		new = kmalloc(sizeof(*new), GFP_KERNEL);
+		if (!new)
+			return -ENOMEM;
+		new->prog = prog;
+	}
+
+	spin_lock_bh(&tun->lock);
+	old = rcu_dereference_protected(tun->steering_prog,
+					lockdep_is_held(&tun->lock));
+	rcu_assign_pointer(tun->steering_prog, new);
+	spin_unlock_bh(&tun->lock);
+
+	if (old)
+		call_rcu(&old->rcu, tun_steering_prog_free);
+
+	return 0;
+}
+
 static void tun_free_netdev(struct net_device *dev)
 {
 	struct tun_struct *tun = netdev_priv(dev);
@@ -1999,6 +2225,7 @@ static void tun_free_netdev(struct net_device *dev)
 	free_percpu(tun->pcpu_stats);
 	tun_flow_uninit(tun);
 	security_tun_dev_free_security(tun->security);
+	__tun_set_steering_ebpf(tun, NULL);
 }
 
 static void tun_setup(struct net_device *dev)
@@ -2072,12 +2299,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 {
 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
 	struct tun_struct *tun = tun_get(tfile);
-	struct sk_buff *skb = m->msg_control;
+	void *ptr = m->msg_control;
 	int ret;
 
 	if (!tun) {
 		ret = -EBADFD;
-		goto out_free_skb;
+		goto out_free;
 	}
 
 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
@@ -2089,7 +2316,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 					 SOL_PACKET, TUN_TX_TIMESTAMP);
 		goto out;
 	}
-	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb);
+	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
 	if (ret > (ssize_t)total_len) {
 		m->msg_flags |= MSG_TRUNC;
 		ret = flags & MSG_TRUNC ? ret : total_len;
@@ -2100,12 +2327,25 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
 
 out_put_tun:
 	tun_put(tun);
-out_free_skb:
-	if (skb)
-		kfree_skb(skb);
+out_free:
+	tun_ptr_free(ptr);
 	return ret;
 }
 
+static int tun_ptr_peek_len(void *ptr)
+{
+	if (likely(ptr)) {
+		if (tun_is_xdp_buff(ptr)) {
+			struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+			return xdp->data_end - xdp->data;
+		}
+		return __skb_array_len_with_tag(ptr);
+	} else {
+		return 0;
+	}
+}
+
 static int tun_peek_len(struct socket *sock)
 {
 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
@@ -2116,7 +2356,7 @@ static int tun_peek_len(struct socket *sock)
 	if (!tun)
 		return 0;
 
-	ret = skb_array_peek_len(&tfile->tx_array);
+	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
 	tun_put(tun);
 
 	return ret;
@@ -2287,6 +2527,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 		tun->filter_attached = false;
 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 		tun->rx_batched = 0;
+		RCU_INIT_POINTER(tun->steering_prog, NULL);
 
 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
 		if (!tun->pcpu_stats) {
@@ -2479,6 +2720,25 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
 	return ret;
 }
 
+static int tun_set_steering_ebpf(struct tun_struct *tun, void __user *data)
+{
+	struct bpf_prog *prog;
+	int fd;
+
+	if (copy_from_user(&fd, data, sizeof(fd)))
+		return -EFAULT;
+
+	if (fd == -1) {
+		prog = NULL;
+	} else {
+		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
+		if (IS_ERR(prog))
+			return PTR_ERR(prog);
+	}
+
+	return __tun_set_steering_ebpf(tun, prog);
+}
+
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 			    unsigned long arg, int ifreq_len)
 {
@@ -2755,6 +3015,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
 		ret = 0;
 		break;
 
+	case TUNSETSTEERINGEBPF:
+		ret = tun_set_steering_ebpf(tun, argp);
+		break;
+
 	default:
 		ret = -EINVAL;
 		break;
@@ -2998,25 +3262,26 @@ static int tun_queue_resize(struct tun_struct *tun)
 {
 	struct net_device *dev = tun->dev;
 	struct tun_file *tfile;
-	struct skb_array **arrays;
+	struct ptr_ring **rings;
 	int n = tun->numqueues + tun->numdisabled;
 	int ret, i;
 
-	arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
-	if (!arrays)
+	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
+	if (!rings)
 		return -ENOMEM;
 
 	for (i = 0; i < tun->numqueues; i++) {
 		tfile = rtnl_dereference(tun->tfiles[i]);
-		arrays[i] = &tfile->tx_array;
+		rings[i] = &tfile->tx_ring;
 	}
 	list_for_each_entry(tfile, &tun->disabled, next)
-		arrays[i++] = &tfile->tx_array;
+		rings[i++] = &tfile->tx_ring;
 
-	ret = skb_array_resize_multiple(arrays, n,
-					dev->tx_queue_len, GFP_KERNEL);
+	ret = ptr_ring_resize_multiple(rings, n,
+				       dev->tx_queue_len, GFP_KERNEL,
+				       tun_ptr_free);
 
-	kfree(arrays);
+	kfree(rings);
 	return ret;
 }
 
@@ -3102,7 +3367,7 @@ struct socket *tun_get_socket(struct file *file)
 }
 EXPORT_SYMBOL_GPL(tun_get_socket);
 
-struct skb_array *tun_get_skb_array(struct file *file)
+struct ptr_ring *tun_get_tx_ring(struct file *file)
 {
 	struct tun_file *tfile;
 
@@ -3111,9 +3376,9 @@ struct skb_array *tun_get_skb_array(struct file *file)
 	tfile = file->private_data;
 	if (!tfile)
 		return ERR_PTR(-EBADFD);
-	return &tfile->tx_array;
+	return &tfile->tx_ring;
 }
-EXPORT_SYMBOL_GPL(tun_get_skb_array);
+EXPORT_SYMBOL_GPL(tun_get_tx_ring);
 
 module_init(tun_init);
 module_exit(tun_cleanup);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 728819f..ae0580b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -826,7 +826,7 @@ static int qmi_wwan_resume(struct usb_interface *intf)
 
 static const struct driver_info	qmi_wwan_info = {
 	.description	= "WWAN/QMI device",
-	.flags		= FLAG_WWAN,
+	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
 	.bind		= qmi_wwan_bind,
 	.unbind		= qmi_wwan_unbind,
 	.manage_power	= qmi_wwan_manage_power,
@@ -835,7 +835,7 @@ static const struct driver_info	qmi_wwan_info = {
 
 static const struct driver_info	qmi_wwan_info_quirk_dtr = {
 	.description	= "WWAN/QMI device",
-	.flags		= FLAG_WWAN,
+	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
 	.bind		= qmi_wwan_bind,
 	.unbind		= qmi_wwan_unbind,
 	.manage_power	= qmi_wwan_manage_power,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f5438d0..a69ad39 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -410,6 +410,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 	if (ifmp && (dev->ifindex != 0))
 		peer->ifindex = ifmp->ifi_index;
 
+	peer->gso_max_size = dev->gso_max_size;
+	peer->gso_max_segs = dev->gso_max_segs;
+
 	err = register_netdevice(peer);
 	put_net(net);
 	net = NULL;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 559b215..12dfc5f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,6 +31,7 @@
 #include <linux/average.h>
 #include <linux/filter.h>
 #include <net/route.h>
+#include <net/xdp.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -115,6 +116,8 @@ struct receive_queue {
 
 	/* Name of this receive queue: input.$index */
 	char name[40];
+
+	struct xdp_rxq_info xdp_rxq;
 };
 
 struct virtnet_info {
@@ -261,9 +264,12 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
 	int opaque;
 
 	opaque = virtqueue_enable_cb_prepare(vq);
-	if (napi_complete_done(napi, processed) &&
-	    unlikely(virtqueue_poll(vq, opaque)))
-		virtqueue_napi_schedule(napi, vq);
+	if (napi_complete_done(napi, processed)) {
+		if (unlikely(virtqueue_poll(vq, opaque)))
+			virtqueue_napi_schedule(napi, vq);
+	} else {
+		virtqueue_disable_cb(vq);
+	}
 }
 
 static void skb_xmit_done(struct virtqueue *vq)
@@ -556,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
 		xdp.data = xdp.data_hard_start + xdp_headroom;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + len;
+		xdp.rxq = &rq->xdp_rxq;
 		orig_data = xdp.data;
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
@@ -689,6 +696,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 		xdp.data = data + vi->hdr_len;
 		xdp_set_data_meta_invalid(&xdp);
 		xdp.data_end = xdp.data + (len - vi->hdr_len);
+		xdp.rxq = &rq->xdp_rxq;
+
 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
 		if (act != XDP_PASS)
@@ -1222,13 +1231,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
 static int virtnet_open(struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	int i;
+	int i, err;
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
 		if (i < vi->curr_queue_pairs)
 			/* Make sure we have some buffers: if oom use wq. */
 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
 				schedule_delayed_work(&vi->refill, 0);
+
+		err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
+		if (err < 0)
+			return err;
+
 		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
 		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
 	}
@@ -1557,6 +1571,7 @@ static int virtnet_close(struct net_device *dev)
 	cancel_delayed_work_sync(&vi->refill);
 
 	for (i = 0; i < vi->max_queue_pairs; i++) {
+		xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
 		napi_disable(&vi->rq[i].napi);
 		virtnet_napi_tx_disable(&vi->sq[i].napi);
 	}
@@ -1891,6 +1906,24 @@ static void virtnet_init_settings(struct net_device *dev)
 	vi->duplex = DUPLEX_UNKNOWN;
 }
 
+static void virtnet_update_settings(struct virtnet_info *vi)
+{
+	u32 speed;
+	u8 duplex;
+
+	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
+		return;
+
+	speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
+						  speed));
+	if (ethtool_validate_speed(speed))
+		vi->speed = speed;
+	duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
+						  duplex));
+	if (ethtool_validate_duplex(duplex))
+		vi->duplex = duplex;
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
 	.get_drvinfo = virtnet_get_drvinfo,
 	.get_link = ethtool_op_get_link,
@@ -2144,6 +2177,7 @@ static void virtnet_config_changed_work(struct work_struct *work)
 	vi->status = v;
 
 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
+		virtnet_update_settings(vi);
 		netif_carrier_on(vi->dev);
 		netif_tx_wake_all_queues(vi->dev);
 	} else {
@@ -2692,6 +2726,7 @@ static int virtnet_probe(struct virtio_device *vdev)
 		schedule_work(&vi->config_work);
 	} else {
 		vi->status = VIRTIO_NET_S_LINK_UP;
+		virtnet_update_settings(vi);
 		netif_carrier_on(dev);
 	}
 
@@ -2793,7 +2828,8 @@ static struct virtio_device_id id_table[] = {
 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
 	VIRTIO_NET_F_CTRL_MAC_ADDR, \
-	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
+	VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
+	VIRTIO_NET_F_SPEED_DUPLEX
 
 static unsigned int features[] = {
 	VIRTNET_FEATURES,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 9c51b8b..5ba2229 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.a.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.11.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040a00
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040b00
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
@@ -416,8 +416,8 @@ struct vmxnet3_adapter {
 
 /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
 #define VMXNET3_DEF_TX_RING_SIZE    512
-#define VMXNET3_DEF_RX_RING_SIZE    256
-#define VMXNET3_DEF_RX_RING2_SIZE   128
+#define VMXNET3_DEF_RX_RING_SIZE    1024
+#define VMXNET3_DEF_RX_RING2_SIZE   256
 
 #define VMXNET3_DEF_RXDATA_DESC_SIZE 128
 
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 31f4b79..82090ae 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3711,18 +3711,16 @@ static __net_init int vxlan_init_net(struct net *net)
 	return 0;
 }
 
-static void __net_exit vxlan_exit_net(struct net *net)
+static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 	struct vxlan_dev *vxlan, *next;
 	struct net_device *dev, *aux;
 	unsigned int h;
-	LIST_HEAD(list);
 
-	rtnl_lock();
 	for_each_netdev_safe(net, dev, aux)
 		if (dev->rtnl_link_ops == &vxlan_link_ops)
-			unregister_netdevice_queue(dev, &list);
+			unregister_netdevice_queue(dev, head);
 
 	list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
 		/* If vxlan->dev is in the same netns, it has already been added
@@ -3730,20 +3728,30 @@ static void __net_exit vxlan_exit_net(struct net *net)
 		 */
 		if (!net_eq(dev_net(vxlan->dev), net)) {
 			gro_cells_destroy(&vxlan->gro_cells);
-			unregister_netdevice_queue(vxlan->dev, &list);
+			unregister_netdevice_queue(vxlan->dev, head);
 		}
 	}
 
-	unregister_netdevice_many(&list);
-	rtnl_unlock();
-
 	for (h = 0; h < PORT_HASH_SIZE; ++h)
 		WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
 }
 
+static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+{
+	struct net *net;
+	LIST_HEAD(list);
+
+	rtnl_lock();
+	list_for_each_entry(net, net_list, exit_list)
+		vxlan_destroy_tunnels(net, &list);
+
+	unregister_netdevice_many(&list);
+	rtnl_unlock();
+}
+
 static struct pernet_operations vxlan_net_ops = {
 	.init = vxlan_init_net,
-	.exit = vxlan_exit_net,
+	.exit_batch = vxlan_exit_batch_net,
 	.id   = &vxlan_net_id,
 	.size = sizeof(struct vxlan_net),
 };
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 87f56d0..deb5ae2 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -47,12 +47,19 @@
 config ATH10K_DEBUGFS
 	bool "Atheros ath10k debugfs support"
 	depends on ATH10K && DEBUG_FS
-	select RELAY
 	---help---
 	  Enabled debugfs support
 
 	  If unsure, say Y to make it easier to debug problems.
 
+config ATH10K_SPECTRAL
+	bool "Atheros ath10k spectral scan support"
+	depends on ATH10K_DEBUGFS
+	select RELAY
+	default n
+	---help---
+	  Say Y to enable access to the FFT/spectral data via debugfs.
+
 config ATH10K_TRACING
 	bool "Atheros ath10k tracing support"
 	depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9492177..8d9a59b 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -15,7 +15,7 @@
 		 p2p.o \
 		 swap.o
 
-ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
+ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 ath10k_core-$(CONFIG_THERMAL) += thermal.o
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b29fdbd..6d065f8d 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -75,6 +75,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA9887_HW_1_0_VERSION,
@@ -99,6 +102,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -122,6 +128,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA6174_HW_2_1_VERSION,
@@ -145,6 +154,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA6174_HW_3_0_VERSION,
@@ -168,6 +180,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA6174_HW_3_2_VERSION,
@@ -194,6 +209,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -223,6 +241,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -257,6 +278,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 1560,
 		.vht160_mcs_tx_highest = 1560,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -290,6 +314,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 780,
 		.vht160_mcs_tx_highest = 780,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -313,6 +340,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -338,6 +368,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 8,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
 	},
 	{
 		.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -368,6 +401,27 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 		.vht160_mcs_rx_highest = 0,
 		.vht160_mcs_tx_highest = 0,
 		.n_cipher_suites = 11,
+		.num_peers = TARGET_TLV_NUM_PEERS,
+		.ast_skid_limit = 0x10,
+		.num_wds_entries = 0x20,
+	},
+	{
+		.id = WCN3990_HW_1_0_DEV_VERSION,
+		.dev_id = 0,
+		.name = "wcn3990 hw1.0",
+		.continuous_frag_desc = true,
+		.tx_chain_mask = 0x7,
+		.rx_chain_mask = 0x7,
+		.max_spatial_stream = 4,
+		.fw = {
+			.dir = WCN3990_HW_1_0_FW_DIR,
+		},
+		.sw_decrypt_mcast_mgmt = true,
+		.hw_ops = &wcn3990_ops,
+		.decap_align_bytes = 1,
+		.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+		.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
+		.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
 	},
 };
 
@@ -390,6 +444,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
 	[ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
 	[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
 	[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
+	[ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -860,6 +915,28 @@ static int ath10k_core_check_smbios(struct ath10k *ar)
 	return 0;
 }
 
+static int ath10k_core_check_dt(struct ath10k *ar)
+{
+	struct device_node *node;
+	const char *variant = NULL;
+
+	node = ar->dev->of_node;
+	if (!node)
+		return -ENOENT;
+
+	of_property_read_string(node, "qcom,ath10k-calibration-variant",
+				&variant);
+	if (!variant)
+		return -ENODATA;
+
+	if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+		ath10k_dbg(ar, ATH10K_DBG_BOOT,
+			   "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+			    variant);
+
+	return 0;
+}
+
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
 	u32 result, address = ar->hw_params.patch_load_addr;
@@ -1231,19 +1308,19 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
 	/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
 	char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
 
-	if (ar->id.bmi_ids_valid) {
-		scnprintf(name, name_len,
-			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
-			  ath10k_bus_str(ar->hif.bus),
-			  ar->id.bmi_chip_id,
-			  ar->id.bmi_board_id);
-		goto out;
-	}
-
 	if (ar->id.bdf_ext[0] != '\0')
 		scnprintf(variant, sizeof(variant), ",variant=%s",
 			  ar->id.bdf_ext);
 
+	if (ar->id.bmi_ids_valid) {
+		scnprintf(name, name_len,
+			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
+			  ath10k_bus_str(ar->hif.bus),
+			  ar->id.bmi_chip_id,
+			  ar->id.bmi_board_id, variant);
+		goto out;
+	}
+
 	scnprintf(name, name_len,
 		  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
 		  ath10k_bus_str(ar->hif.bus),
@@ -2343,7 +2420,11 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
 
 	ret = ath10k_core_check_smbios(ar);
 	if (ret)
-		ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
+
+	ret = ath10k_core_check_dt(ar);
+	if (ret)
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
 
 	ret = ath10k_core_fetch_board_file(ar);
 	if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 643041e..631df21 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -67,7 +67,6 @@
 
 /* NAPI poll budget */
 #define ATH10K_NAPI_BUDGET      64
-#define ATH10K_NAPI_QUOTA_LIMIT 60
 
 /* SMBIOS type containing Board Data File Name Extension */
 #define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
@@ -364,11 +363,11 @@ struct ath10k_sta {
 	struct rate_info txrate;
 
 	struct work_struct update_wk;
+	u64 rx_duration;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 	/* protected by conf_mutex */
 	bool aggr_mode;
-	u64 rx_duration;
 #endif
 };
 
@@ -463,7 +462,7 @@ struct ath10k_fw_crash_data {
 	bool crashed_since_read;
 
 	guid_t guid;
-	struct timespec timestamp;
+	struct timespec64 timestamp;
 	__le32 registers[REG_DUMP_COUNT_QCA988X];
 	struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
 };
@@ -488,7 +487,6 @@ struct ath10k_debug {
 	/* protected by conf_mutex */
 	u64 fw_dbglog_mask;
 	u32 fw_dbglog_level;
-	u32 pktlog_filter;
 	u32 reg_addr;
 	u32 nf_cal_period;
 	void *cal_data;
@@ -615,6 +613,9 @@ enum ath10k_fw_features {
 	/* Firmware does not support power save in station mode. */
 	ATH10K_FW_FEATURE_NO_PS = 17,
 
+	/* Firmware allows management tx by reference instead of by value. */
+	ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -963,6 +964,7 @@ struct ath10k {
 	} spectral;
 #endif
 
+	u32 pktlog_filter;
 	struct {
 		/* protected by conf_mutex */
 		struct ath10k_fw_components utf_mode_fw;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index df51450..181fd8e 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -720,7 +720,7 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
 
 	crash_data->crashed_since_read = true;
 	guid_gen(&crash_data->guid);
-	getnstimeofday(&crash_data->timestamp);
+	ktime_get_real_ts64(&crash_data->timestamp);
 
 	return crash_data;
 }
@@ -1950,14 +1950,14 @@ int ath10k_debug_start(struct ath10k *ar)
 				    ret);
 	}
 
-	if (ar->debug.pktlog_filter) {
+	if (ar->pktlog_filter) {
 		ret = ath10k_wmi_pdev_pktlog_enable(ar,
-						    ar->debug.pktlog_filter);
+						    ar->pktlog_filter);
 		if (ret)
 			/* not serious */
 			ath10k_warn(ar,
 				    "failed to enable pktlog filter %x: %d\n",
-				    ar->debug.pktlog_filter, ret);
+				    ar->pktlog_filter, ret);
 	} else {
 		ret = ath10k_wmi_pdev_pktlog_disable(ar);
 		if (ret)
@@ -2097,12 +2097,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
 	mutex_lock(&ar->conf_mutex);
 
 	if (ar->state != ATH10K_STATE_ON) {
-		ar->debug.pktlog_filter = filter;
+		ar->pktlog_filter = filter;
 		ret = count;
 		goto out;
 	}
 
-	if (filter == ar->debug.pktlog_filter) {
+	if (filter == ar->pktlog_filter) {
 		ret = count;
 		goto out;
 	}
@@ -2111,7 +2111,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
 		ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
 		if (ret) {
 			ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
-				    ar->debug.pktlog_filter, ret);
+				    ar->pktlog_filter, ret);
 			goto out;
 		}
 	} else {
@@ -2122,7 +2122,7 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
 		}
 	}
 
-	ar->debug.pktlog_filter = filter;
+	ar->pktlog_filter = filter;
 	ret = count;
 
 out:
@@ -2139,7 +2139,7 @@ static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
 
 	mutex_lock(&ar->conf_mutex);
 	len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
-			ar->debug.pktlog_filter);
+			ar->pktlog_filter);
 	mutex_unlock(&ar->conf_mutex);
 
 	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 548ad54..5e66299 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -51,7 +51,8 @@ enum ath10k_pktlog_filter {
 	ATH10K_PKTLOG_RCFIND     = 0x000000004,
 	ATH10K_PKTLOG_RCUPDATE   = 0x000000008,
 	ATH10K_PKTLOG_DBG_PRINT  = 0x000000010,
-	ATH10K_PKTLOG_ANY        = 0x00000001f,
+	ATH10K_PKTLOG_PEER_STATS = 0x000000040,
+	ATH10K_PKTLOG_ANY        = 0x00000005f,
 };
 
 enum ath10k_dbg_aggr_mode {
@@ -60,6 +61,21 @@ enum ath10k_dbg_aggr_mode {
 	ATH10K_DBG_AGGR_MODE_MAX,
 };
 
+/* Types of packet log events */
+enum ath_pktlog_type {
+	ATH_PKTLOG_TYPE_TX_CTRL = 1,
+	ATH_PKTLOG_TYPE_TX_STAT,
+};
+
+struct ath10k_pktlog_hdr {
+	__le16 flags;
+	__le16 missed_cnt;
+	__le16 log_type; /* Type of log information foll this header */
+	__le16 size; /* Size of variable length log information in bytes */
+	__le32 timestamp;
+	u8 payload[0];
+} __packed;
+
 /* FIXME: How to calculate the buffer size sanely? */
 #define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
 
@@ -190,9 +206,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			    struct ieee80211_sta *sta, struct dentry *dir);
 void ath10k_sta_update_rx_duration(struct ath10k *ar,
 				   struct ath10k_fw_stats *stats);
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			   struct ieee80211_sta *sta,
-			   struct station_info *sinfo);
 #else
 static inline
 void ath10k_sta_update_rx_duration(struct ath10k *ar,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index d59ac6b..ff96f70 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -65,33 +65,6 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar,
 		ath10k_sta_update_stats_rx_duration(ar, stats);
 }
 
-void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			   struct ieee80211_sta *sta,
-			   struct station_info *sinfo)
-{
-	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
-	struct ath10k *ar = arsta->arvif->ar;
-
-	if (!ath10k_peer_stats_enabled(ar))
-		return;
-
-	sinfo->rx_duration = arsta->rx_duration;
-	sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
-
-	if (!arsta->txrate.legacy && !arsta->txrate.nss)
-		return;
-
-	if (arsta->txrate.legacy) {
-		sinfo->txrate.legacy = arsta->txrate.legacy;
-	} else {
-		sinfo->txrate.mcs = arsta->txrate.mcs;
-		sinfo->txrate.nss = arsta->txrate.nss;
-		sinfo->txrate.bw = arsta->txrate.bw;
-	}
-	sinfo->txrate.flags = arsta->txrate.flags;
-	sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
-}
-
 static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
 					     char __user *user_buf,
 					     size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 6305308..7bd93d6 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1497,6 +1497,23 @@ struct htt_peer_tx_stats {
 	u8 payload[0];
 } __packed;
 
+#define ATH10K_10_2_TX_STATS_OFFSET	136
+#define PEER_STATS_FOR_NO_OF_PPDUS	4
+
+struct ath10k_10_2_peer_tx_stats {
+	u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+	u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
+	__le32 tx_duration;
+	u8 tx_ppdu_cnt;
+	u8 peer_id;
+} __packed;
+
 union htt_rx_pn_t {
 	/* WEP: 24-bit PN */
 	u32 pn24;
@@ -1695,7 +1712,7 @@ struct ath10k_htt {
 	/* This is used to group tx/rx completions separately and process them
 	 * in batches to reduce cache stalls
 	 */
-	struct sk_buff_head rx_compl_q;
+	struct sk_buff_head rx_msdus_q;
 	struct sk_buff_head rx_in_ord_compl_q;
 	struct sk_buff_head tx_fetch_ind_q;
 
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 7d295ee..620ed7d 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -227,7 +227,7 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
 {
 	del_timer_sync(&htt->rx_ring.refill_retry_timer);
 
-	skb_queue_purge(&htt->rx_compl_q);
+	skb_queue_purge(&htt->rx_msdus_q);
 	skb_queue_purge(&htt->rx_in_ord_compl_q);
 	skb_queue_purge(&htt->tx_fetch_ind_q);
 
@@ -515,7 +515,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
 	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
 	hash_init(htt->rx_ring.skb_table);
 
-	skb_queue_head_init(&htt->rx_compl_q);
+	skb_queue_head_init(&htt->rx_msdus_q);
 	skb_queue_head_init(&htt->rx_in_ord_compl_q);
 	skb_queue_head_init(&htt->tx_fetch_ind_q);
 	atomic_set(&htt->num_mpdus_ready, 0);
@@ -974,16 +974,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
 	return out;
 }
 
-static void ath10k_process_rx(struct ath10k *ar,
-			      struct ieee80211_rx_status *rx_status,
-			      struct sk_buff *skb)
+static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
+				       struct ieee80211_rx_status *rx_status,
+				       struct sk_buff *skb)
+{
+	struct ieee80211_rx_status *status;
+
+	status = IEEE80211_SKB_RXCB(skb);
+	*status = *rx_status;
+
+	__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+}
+
+static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ieee80211_rx_status *status;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	char tid[32];
 
 	status = IEEE80211_SKB_RXCB(skb);
-	*status = *rx_status;
 
 	ath10k_dbg(ar, ATH10K_DBG_DATA,
 		   "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
@@ -1517,7 +1526,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
 	}
 }
 
-static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
+static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
 				    struct sk_buff_head *amsdu,
 				    struct ieee80211_rx_status *status)
 {
@@ -1540,7 +1549,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
 			status->flag |= RX_FLAG_ALLOW_SAME_PN;
 		}
 
-		ath10k_process_rx(ar, status, msdu);
+		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
 	}
 }
 
@@ -1652,7 +1661,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 	struct ath10k *ar = htt->ar;
 	struct ieee80211_rx_status *rx_status = &htt->rx_status;
 	struct sk_buff_head amsdu;
-	int ret, num_msdus;
+	int ret;
 
 	__skb_queue_head_init(&amsdu);
 
@@ -1674,7 +1683,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 		return ret;
 	}
 
-	num_msdus = skb_queue_len(&amsdu);
 	ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
 
 	/* only for ret = 1 indicates chained msdus */
@@ -1683,9 +1691,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
 
 	ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
 	ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
-	ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+	ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
 
-	return num_msdus;
+	return 0;
 }
 
 static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1893,15 +1901,14 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
 			RX_FLAG_MMIC_STRIPPED;
 }
 
-static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
-				      struct sk_buff_head *list)
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+				       struct sk_buff_head *list)
 {
 	struct ath10k_htt *htt = &ar->htt;
 	struct ieee80211_rx_status *status = &htt->rx_status;
 	struct htt_rx_offload_msdu *rx;
 	struct sk_buff *msdu;
 	size_t offset;
-	int num_msdu = 0;
 
 	while ((msdu = __skb_dequeue(list))) {
 		/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1940,10 +1947,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
 
 		ath10k_htt_rx_h_rx_offload_prot(status, msdu);
 		ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
-		ath10k_process_rx(ar, status, msdu);
-		num_msdu++;
+		ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
 	}
-	return num_msdu;
 }
 
 static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
@@ -1959,7 +1964,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 	u8 tid;
 	bool offload;
 	bool frag;
-	int ret, num_msdus = 0;
+	int ret;
 
 	lockdep_assert_held(&htt->rx_ring.lock);
 
@@ -2001,7 +2006,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 	 * separately.
 	 */
 	if (offload)
-		num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
+		ath10k_htt_rx_h_rx_offload(ar, &list);
 
 	while (!skb_queue_empty(&list)) {
 		__skb_queue_head_init(&amsdu);
@@ -2014,11 +2019,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 			 * better to report something than nothing though. This
 			 * should still give an idea about rx rate to the user.
 			 */
-			num_msdus += skb_queue_len(&amsdu);
 			ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
 			ath10k_htt_rx_h_filter(ar, &amsdu, status);
 			ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
-			ath10k_htt_rx_h_deliver(ar, &amsdu, status);
+			ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
 			break;
 		case -EAGAIN:
 			/* fall through */
@@ -2030,7 +2034,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
 			return -EIO;
 		}
 	}
-	return num_msdus;
+	return ret;
 }
 
 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2449,6 +2453,62 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
 	rcu_read_unlock();
 }
 
+static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
+{
+	struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
+	struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+	struct ath10k_10_2_peer_tx_stats *tx_stats;
+	struct ieee80211_sta *sta;
+	struct ath10k_peer *peer;
+	u16 log_type = __le16_to_cpu(hdr->log_type);
+	u32 peer_id = 0, i;
+
+	if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
+		return;
+
+	tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
+		    ATH10K_10_2_TX_STATS_OFFSET);
+
+	if (!tx_stats->tx_ppdu_cnt)
+		return;
+
+	peer_id = tx_stats->peer_id;
+
+	rcu_read_lock();
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find_by_id(ar, peer_id);
+	if (!peer) {
+		ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
+			    peer_id);
+		goto out;
+	}
+
+	sta = peer->sta;
+	for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
+		p_tx_stats->succ_bytes =
+			__le16_to_cpu(tx_stats->success_bytes[i]);
+		p_tx_stats->retry_bytes =
+			__le16_to_cpu(tx_stats->retry_bytes[i]);
+		p_tx_stats->failed_bytes =
+			__le16_to_cpu(tx_stats->failed_bytes[i]);
+		p_tx_stats->ratecode = tx_stats->ratecode[i];
+		p_tx_stats->flags = tx_stats->flags[i];
+		p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
+		p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
+		p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
+
+		ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+	}
+	spin_unlock_bh(&ar->data_lock);
+	rcu_read_unlock();
+
+	return;
+
+out:
+	spin_unlock_bh(&ar->data_lock);
+	rcu_read_unlock();
+}
+
 bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_htt *htt = &ar->htt;
@@ -2566,6 +2626,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 					skb->len -
 					offsetof(struct htt_resp,
 						 pktlog_msg.payload));
+
+		if (ath10k_peer_stats_enabled(ar))
+			ath10k_fetch_10_2_tx_stats(ar,
+						   resp->pktlog_msg.payload);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2631,6 +2695,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
 }
 EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
 
+static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
+{
+	struct sk_buff *skb;
+
+	while (quota < budget) {
+		if (skb_queue_empty(&ar->htt.rx_msdus_q))
+			break;
+
+		skb = __skb_dequeue(&ar->htt.rx_msdus_q);
+		if (!skb)
+			break;
+		ath10k_process_rx(ar, skb);
+		quota++;
+	}
+
+	return quota;
+}
+
 int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 {
 	struct ath10k_htt *htt = &ar->htt;
@@ -2638,63 +2720,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
 	struct sk_buff_head tx_ind_q;
 	struct sk_buff *skb;
 	unsigned long flags;
-	int quota = 0, done, num_rx_msdus;
+	int quota = 0, done, ret;
 	bool resched_napi = false;
 
 	__skb_queue_head_init(&tx_ind_q);
 
-	/* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
-	 * process it first to utilize full available quota.
+	/* Process pending frames before dequeuing more data
+	 * from hardware.
 	 */
-	while (quota < budget) {
-		if (skb_queue_empty(&htt->rx_in_ord_compl_q))
-			break;
+	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+	if (quota == budget) {
+		resched_napi = true;
+		goto exit;
+	}
 
-		skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
-		if (!skb) {
-			resched_napi = true;
-			goto exit;
-		}
-
+	while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
 		spin_lock_bh(&htt->rx_ring.lock);
-		num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+		ret = ath10k_htt_rx_in_ord_ind(ar, skb);
 		spin_unlock_bh(&htt->rx_ring.lock);
-		if (num_rx_msdus < 0) {
-			resched_napi = true;
-			goto exit;
-		}
 
 		dev_kfree_skb_any(skb);
-		if (num_rx_msdus > 0)
-			quota += num_rx_msdus;
-
-		if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
-		    !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+		if (ret == -EIO) {
 			resched_napi = true;
 			goto exit;
 		}
 	}
 
-	while (quota < budget) {
-		/* no more data to receive */
-		if (!atomic_read(&htt->num_mpdus_ready))
-			break;
-
-		num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
-		if (num_rx_msdus < 0) {
+	while (atomic_read(&htt->num_mpdus_ready)) {
+		ret = ath10k_htt_rx_handle_amsdu(htt);
+		if (ret == -EIO) {
 			resched_napi = true;
 			goto exit;
 		}
-
-		quota += num_rx_msdus;
 		atomic_dec(&htt->num_mpdus_ready);
-		if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
-		    atomic_read(&htt->num_mpdus_ready)) {
-			resched_napi = true;
-			goto exit;
-		}
 	}
 
+	/* Deliver received data after processing data from hardware */
+	quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+
 	/* From NAPI documentation:
 	 *  The napi poll() function may also process TX completions, in which
 	 *  case if it processes the entire TX ring then it should count that
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 88955bb..c31eea6 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -931,3 +931,5 @@ const struct ath10k_hw_ops qca6174_ops = {
 	.set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
 	.enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
 };
+
+const struct ath10k_hw_ops wcn3990_ops = {};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 05f26e5..90ad39b 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
 #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA4019_HW_1_0_PATCH_LOAD_ADDR  0x1234
 
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION	ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR		ATH10K_FW_DIR "/WCN3990/hw3.0"
+
 #define ATH10K_FW_FILE_BASE		"firmware"
 #define ATH10K_FW_API_MAX		6
 #define ATH10K_FW_API_MIN		2
@@ -553,6 +557,10 @@ struct ath10k_hw_params {
 
 	/* Number of ciphers supported (i.e First N) in cipher_suites array */
 	int n_cipher_suites;
+
+	u32 num_peers;
+	u32 ast_skid_limit;
+	u32 num_wds_entries;
 };
 
 struct htt_rx_desc;
@@ -567,6 +575,7 @@ struct ath10k_hw_ops {
 extern const struct ath10k_hw_ops qca988x_ops;
 extern const struct ath10k_hw_ops qca99x0_ops;
 extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
 
 extern const struct ath10k_hw_clk_params qca6174_clk[];
 
@@ -663,6 +672,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define TARGET_TLV_NUM_MSDU_DESC		(1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS		22
 
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_10_TLV_NUM_PEERS		14
+#define TARGET_HL_10_TLV_AST_SKID_LIMIT		6
+#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES	2
+
 /* Diagnostic Window */
 #define CE_DIAG_PIPE	7
 
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0a947ee..75726f1 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2563,7 +2563,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
 		}
 		break;
 	case WMI_VDEV_TYPE_STA:
-		if (vif->bss_conf.qos)
+		if (sta->wme)
 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
 		break;
 	case WMI_VDEV_TYPE_IBSS:
@@ -3574,7 +3574,9 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
 		return ATH10K_MAC_TX_HTT;
 	case ATH10K_HW_TXRX_MGMT:
 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-			     ar->running_fw->fw_file.fw_features))
+			     ar->running_fw->fw_file.fw_features) ||
+			     test_bit(WMI_SERVICE_MGMT_TX_WMI,
+				      ar->wmi.svc_map))
 			return ATH10K_MAC_TX_WMI_MGMT;
 		else if (ar->htt.target_version_major >= 3)
 			return ATH10K_MAC_TX_HTT;
@@ -6201,6 +6203,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 			   "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
 			   arvif->vdev_id, sta->addr, sta);
 
+		if (sta->tdls) {
+			ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+							  sta,
+							  WMI_TDLS_PEER_STATE_TEARDOWN);
+			if (ret)
+				ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+					    sta->addr,
+					    WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+		}
+
 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
 		if (ret)
 			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
@@ -7536,6 +7548,16 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 				    arvif->vdev_id, ret);
 	}
 
+	if (ath10k_peer_stats_enabled(ar)) {
+		ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
+		ret = ath10k_wmi_pdev_pktlog_enable(ar,
+						    ar->pktlog_filter);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
+			goto err_stop;
+		}
+	}
+
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
 
@@ -7620,6 +7642,34 @@ static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
 			peer->removed = true;
 }
 
+static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  struct ieee80211_sta *sta,
+				  struct station_info *sinfo)
+{
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = arsta->arvif->ar;
+
+	if (!ath10k_peer_stats_enabled(ar))
+		return;
+
+	sinfo->rx_duration = arsta->rx_duration;
+	sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+
+	if (!arsta->txrate.legacy && !arsta->txrate.nss)
+		return;
+
+	if (arsta->txrate.legacy) {
+		sinfo->txrate.legacy = arsta->txrate.legacy;
+	} else {
+		sinfo->txrate.mcs = arsta->txrate.mcs;
+		sinfo->txrate.nss = arsta->txrate.nss;
+		sinfo->txrate.bw = arsta->txrate.bw;
+	}
+	sinfo->txrate.flags = arsta->txrate.flags;
+	sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE;
+}
+
 static const struct ieee80211_ops ath10k_ops = {
 	.tx				= ath10k_mac_op_tx,
 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
@@ -7661,6 +7711,7 @@ static const struct ieee80211_ops ath10k_ops = {
 	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
 	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
 	.sta_pre_rcu_remove		= ath10k_mac_op_sta_pre_rcu_remove,
+	.sta_statistics			= ath10k_sta_statistics,
 
 	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
 
@@ -7671,7 +7722,6 @@ static const struct ieee80211_ops ath10k_ops = {
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
-	.sta_statistics			= ath10k_sta_statistics,
 #endif
 };
 
@@ -8329,15 +8379,6 @@ int ath10k_mac_register(struct ath10k *ar)
 			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
 	}
 
-	/* Current wake_tx_queue implementation imposes a significant
-	 * performance penalty in some setups. The tx scheduling code needs
-	 * more work anyway so disable the wake_tx_queue unless firmware
-	 * supports the pull-push mechanism.
-	 */
-	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
-		      ar->running_fw->fw_file.fw_features))
-		ar->ops->wake_tx_queue = NULL;
-
 	ret = ath10k_mac_init_rd(ar);
 	if (ret) {
 		ath10k_err(ar, "failed to derive regdom: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 89b0ad7..b2a2e8a 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -44,7 +44,7 @@ enum ath10k_spectral_mode {
 	SPECTRAL_MANUAL,
 };
 
-#ifdef CONFIG_ATH10K_DEBUGFS
+#ifdef CONFIG_ATH10K_SPECTRAL
 
 int ath10k_spectral_process_fft(struct ath10k *ar,
 				struct wmi_phyerr_ev_arg *phyerr,
@@ -85,6 +85,6 @@ static inline void ath10k_spectral_destroy(struct ath10k *ar)
 {
 }
 
-#endif /* CONFIG_ATH10K_DEBUGFS */
+#endif /* CONFIG_ATH10K_SPECTRAL */
 
 #endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 2fc3f24..41eef94 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -377,6 +377,7 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
 	struct sk_buff *skb;
 	int ret;
+	u32 mgmt_tx_cmdid;
 
 	if (!ar->wmi.ops->gen_mgmt_tx)
 		return -EOPNOTSUPP;
@@ -385,7 +386,13 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 	if (IS_ERR(skb))
 		return PTR_ERR(skb);
 
-	ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+	if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+		     ar->running_fw->fw_file.fw_features))
+		mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
+	else
+		mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;
+
+	ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7616c1c4..8d53063b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -917,33 +917,69 @@ ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
 	return -ENOMEM;
 }
 
-static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
-					     struct sk_buff *skb,
-					     struct wmi_svc_rdy_ev_arg *arg)
-{
-	const void **tb;
+struct wmi_tlv_svc_rdy_parse {
 	const struct hal_reg_capabilities *reg;
 	const struct wmi_tlv_svc_rdy_ev *ev;
 	const __le32 *svc_bmap;
 	const struct wlan_host_mem_req *mem_reqs;
+	bool svc_bmap_done;
+	bool dbs_hw_mode_done;
+};
+
+static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
+					const void *ptr, void *data)
+{
+	struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
+
+	switch (tag) {
+	case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
+		svc_rdy->ev = ptr;
+		break;
+	case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
+		svc_rdy->reg = ptr;
+		break;
+	case WMI_TLV_TAG_ARRAY_STRUCT:
+		svc_rdy->mem_reqs = ptr;
+		break;
+	case WMI_TLV_TAG_ARRAY_UINT32:
+		if (!svc_rdy->svc_bmap_done) {
+			svc_rdy->svc_bmap_done = true;
+			svc_rdy->svc_bmap = ptr;
+		} else if (!svc_rdy->dbs_hw_mode_done) {
+			svc_rdy->dbs_hw_mode_done = true;
+		}
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
+					     struct sk_buff *skb,
+					     struct wmi_svc_rdy_ev_arg *arg)
+{
+	const struct hal_reg_capabilities *reg;
+	const struct wmi_tlv_svc_rdy_ev *ev;
+	const __le32 *svc_bmap;
+	const struct wlan_host_mem_req *mem_reqs;
+	struct wmi_tlv_svc_rdy_parse svc_rdy = { };
 	int ret;
 
-	tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
-	if (IS_ERR(tb)) {
-		ret = PTR_ERR(tb);
+	ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+				  ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
+	if (ret) {
 		ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
 		return ret;
 	}
 
-	ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
-	reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
-	svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
-	mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+	ev = svc_rdy.ev;
+	reg = svc_rdy.reg;
+	svc_bmap = svc_rdy.svc_bmap;
+	mem_reqs = svc_rdy.mem_reqs;
 
-	if (!ev || !reg || !svc_bmap || !mem_reqs) {
-		kfree(tb);
+	if (!ev || !reg || !svc_bmap || !mem_reqs)
 		return -EPROTO;
-	}
 
 	/* This is an internal ABI compatibility check for WMI TLV so check it
 	 * here instead of the generic WMI code.
@@ -961,7 +997,6 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
 	    __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
 	    __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
 	    __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
-		kfree(tb);
 		return -ENOTSUPP;
 	}
 
@@ -982,12 +1017,10 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
 	ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
 				  ath10k_wmi_tlv_parse_mem_reqs, arg);
 	if (ret) {
-		kfree(tb);
 		ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
 		return ret;
 	}
 
-	kfree(tb);
 	return 0;
 }
 
@@ -1406,7 +1439,10 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 	cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
 
 	cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
-	cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+
+	cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+	cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
+	cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
 
 	if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
 		cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -1418,7 +1454,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 
 	cfg->num_peer_keys = __cpu_to_le32(2);
 	cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
-	cfg->ast_skid_limit = __cpu_to_le32(0x10);
 	cfg->tx_chain_mask = __cpu_to_le32(0x7);
 	cfg->rx_chain_mask = __cpu_to_le32(0x7);
 	cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
@@ -1434,7 +1469,6 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
 	cfg->num_mcast_table_elems = __cpu_to_le32(0);
 	cfg->mcast2ucast_mode = __cpu_to_le32(0);
 	cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
-	cfg->num_wds_entries = __cpu_to_le32(0x20);
 	cfg->dma_burst_size = __cpu_to_le32(0);
 	cfg->mac_aggr_delim = __cpu_to_le32(0);
 	cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
@@ -2450,6 +2484,82 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
 }
 
 static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+	struct wmi_tlv_mgmt_tx_cmd *cmd;
+	struct wmi_tlv *tlv;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff *skb;
+	void *ptr;
+	int len;
+	u32 buf_len = msdu->len;
+	u16 fc;
+	struct ath10k_vif *arvif;
+	dma_addr_t mgmt_frame_dma;
+	u32 vdev_id;
+
+	if (!cb->vif)
+		return ERR_PTR(-EINVAL);
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	fc = le16_to_cpu(hdr->frame_control);
+	arvif = (void *)cb->vif->drv_priv;
+	vdev_id = arvif->vdev_id;
+
+	if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+		return ERR_PTR(-EINVAL);
+
+	len = sizeof(*cmd) + 2 * sizeof(*tlv);
+
+	if ((ieee80211_is_action(hdr->frame_control) ||
+	     ieee80211_is_deauth(hdr->frame_control) ||
+	     ieee80211_is_disassoc(hdr->frame_control)) &&
+	     ieee80211_has_protected(hdr->frame_control)) {
+		len += IEEE80211_CCMP_MIC_LEN;
+		buf_len += IEEE80211_CCMP_MIC_LEN;
+	}
+
+	buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+	buf_len = round_up(buf_len, 4);
+
+	len += buf_len;
+	len = round_up(len, 4);
+	skb = ath10k_wmi_alloc_skb(ar, len);
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	ptr = (void *)skb->data;
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+	tlv->len = __cpu_to_le16(sizeof(*cmd));
+	cmd = (void *)tlv->value;
+	cmd->vdev_id = __cpu_to_le32(vdev_id);
+	cmd->desc_id = 0;
+	cmd->chanfreq = 0;
+	cmd->buf_len = __cpu_to_le32(buf_len);
+	cmd->frame_len = __cpu_to_le32(msdu->len);
+	mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
+					msdu->len, DMA_TO_DEVICE);
+	if (!mgmt_frame_dma)
+		return ERR_PTR(-ENOMEM);
+
+	cmd->paddr = __cpu_to_le64(mgmt_frame_dma);
+
+	ptr += sizeof(*tlv);
+	ptr += sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+	tlv->len = __cpu_to_le16(buf_len);
+
+	ptr += sizeof(*tlv);
+	memcpy(ptr, msdu->data, buf_len);
+
+	return skb;
+}
+
+static struct sk_buff *
 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
 				    enum wmi_force_fw_hang_type type,
 				    u32 delay_ms)
@@ -3258,6 +3368,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
 	.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
 	.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
 	.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+	.mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
 	.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
 	.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
 	.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
@@ -3592,6 +3703,7 @@ static const struct wmi_ops wmi_tlv_ops = {
 	.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
 	.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
 	/* .gen_mgmt_tx = not implemented; HTT is used */
+	.gen_mgmt_tx =  ath10k_wmi_tlv_op_gen_mgmt_tx,
 	.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
 	.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
 	.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 22cf011..4faaa64 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -22,6 +22,7 @@
 #define WMI_TLV_CMD_UNSUPPORTED 0
 #define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
 #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN	64
 
 enum wmi_tlv_grp_id {
 	WMI_TLV_GRP_START = 0x3,
@@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id {
 	WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
 	WMI_TLV_MGMT_TX_CMDID,
 	WMI_TLV_PRB_TMPL_CMDID,
+	WMI_TLV_MGMT_TX_SEND_CMD,
 	WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
 	WMI_TLV_ADDBA_SEND_CMDID,
 	WMI_TLV_ADDBA_STATUS_CMDID,
@@ -890,6 +892,63 @@ enum wmi_tlv_tag {
 	WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
 	WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
 	WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+	WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT,
+	WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD,
+	WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST,
+	WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD,
+	WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD,
+	WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT,
+	WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT,
+	WMI_TLV_TAG_STRUCT_OCB_CHANNEL,
+	WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT,
+	WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL,
+	WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN,
+	WMI_TLV_TAG_STRUCT_QOS_PARAMETER,
+	WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG,
+	WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+	WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM,
+	WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+	WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD,
+	WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT,
+	WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP,
+	WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP,
+	WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS,
+	WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH,
+	WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE,
+	WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION,
+	WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG,
+	WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD,
+	WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG,
+	WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT,
+	WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP,
+	WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD,
+	WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD,
+	WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT,
+	WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT,
+	WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS,
+	WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+	WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD,
+	WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+	WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T,
+	WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+	WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG,
+	WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT,
+	WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG,
+	WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
+	WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
+	WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
 
 	WMI_TLV_TAG_MAX
 };
@@ -965,6 +1024,50 @@ enum wmi_tlv_service {
 	WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
 	WMI_TLV_SERVICE_MDNS_OFFLOAD,
 	WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+	WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+	WMI_TLV_SERVICE_OCB,
+	WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+	WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+	WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+	WMI_TLV_SERVICE_MGMT_TX_HTT,
+	WMI_TLV_SERVICE_MGMT_TX_WMI,
+	WMI_TLV_SERVICE_EXT_MSG,
+	WMI_TLV_SERVICE_MAWC,
+	WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+	WMI_TLV_SERVICE_EGAP,
+	WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+	WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+	WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+	WMI_TLV_SERVICE_ATF,
+	WMI_TLV_SERVICE_COEX_GPIO,
+	WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+	WMI_TLV_SERVICE_ENTERPRISE_MESH,
+	WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+	WMI_TLV_SERVICE_BPF_OFFLOAD,
+	WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+	WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+	WMI_TLV_SERVICE_NAN_DATA,
+	WMI_TLV_SERVICE_NAN_RTT,
+	WMI_TLV_SERVICE_11AX,
+	WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+	WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+	WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+	WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+	WMI_TLV_SERVICE_MESH_11S,
+	WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+	WMI_TLV_SERVICE_VDEV_RX_FILTER,
+	WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+	WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+	WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+	WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+	WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+	WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+	WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+	WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
 };
 
 #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
@@ -1121,6 +1224,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
 	       WMI_SERVICE_MDNS_OFFLOAD, len);
 	SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
 	       WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+	SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+	       WMI_SERVICE_MGMT_TX_WMI, len);
 }
 
 #undef SVCMAP
@@ -1643,4 +1748,12 @@ struct wmi_tlv_tx_pause_ev {
 
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
+struct wmi_tlv_mgmt_tx_cmd {
+	__le32 vdev_id;
+	__le32 desc_id;
+	__le32 chanfreq;
+	__le64 paddr;
+	__le32 frame_len;
+	__le32 buf_len;
+} __packed;
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index cad2e42..b6cbc02 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -29,6 +29,7 @@
 #include "p2p.h"
 #include "hw.h"
 #include "hif.h"
+#include "txrx.h"
 
 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
@@ -4456,6 +4457,74 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
 		   __le32_to_cpu(ev->rate_max));
 }
 
+static void
+ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_tdls_peer_event *ev;
+	struct ath10k_peer *peer;
+	struct ath10k_vif *arvif;
+	int vdev_id;
+	int peer_status;
+	int peer_reason;
+	u8 reason;
+
+	if (skb->len < sizeof(*ev)) {
+		ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
+			   skb->len);
+		return;
+	}
+
+	ev = (struct wmi_tdls_peer_event *)skb->data;
+	vdev_id = __le32_to_cpu(ev->vdev_id);
+	peer_status = __le32_to_cpu(ev->peer_status);
+	peer_reason = __le32_to_cpu(ev->peer_reason);
+
+	spin_lock_bh(&ar->data_lock);
+	peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!peer) {
+		ath10k_warn(ar, "failed to find peer entry for %pM\n",
+			    ev->peer_macaddr.addr);
+		return;
+	}
+
+	switch (peer_status) {
+	case WMI_TDLS_SHOULD_TEARDOWN:
+		switch (peer_reason) {
+		case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+		case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
+		case WMI_TDLS_TEARDOWN_REASON_RSSI:
+			reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
+			break;
+		default:
+			reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+			break;
+		}
+
+		arvif = ath10k_get_arvif(ar, vdev_id);
+		if (!arvif) {
+			ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
+				    vdev_id);
+			return;
+		}
+
+		ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
+					    NL80211_TDLS_TEARDOWN, reason,
+					    GFP_ATOMIC);
+
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received tdls teardown event for peer %pM reason %u\n",
+			   ev->peer_macaddr.addr, peer_reason);
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received unknown tdls peer event %u\n",
+			   peer_status);
+		break;
+	}
+}
+
 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
 {
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
@@ -5477,6 +5546,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
 	case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
 		ath10k_wmi_event_pdev_tpc_config(ar, skb);
 		break;
+	case WMI_10_4_TDLS_PEER_EVENTID:
+		ath10k_wmi_handle_tdls_peer_event(ar, skb);
+		break;
 	default:
 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
 		break;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index c02b21c..f6d60dc 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -195,6 +195,7 @@ enum wmi_service {
 	WMI_SERVICE_SMART_LOGGING_SUPPORT,
 	WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
 	WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+	WMI_SERVICE_MGMT_TX_WMI,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -797,6 +798,7 @@ struct wmi_cmd_map {
 	u32 bcn_filter_rx_cmdid;
 	u32 prb_req_filter_rx_cmdid;
 	u32 mgmt_tx_cmdid;
+	u32 mgmt_tx_send_cmdid;
 	u32 prb_tmpl_cmdid;
 	u32 addba_clear_resp_cmdid;
 	u32 addba_send_cmdid;
@@ -5236,7 +5238,8 @@ enum wmi_10_4_vdev_param {
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
 #define WMI_TXBF_STS_CAP_OFFSET_LSB	4
-#define WMI_TXBF_STS_CAP_OFFSET_MASK	0xf0
+#define WMI_TXBF_STS_CAP_OFFSET_MASK	0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF       BIT(7)
 #define WMI_BF_SOUND_DIM_OFFSET_LSB	8
 #define WMI_BF_SOUND_DIM_OFFSET_MASK	0xf00
 
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b53eb2b..2ba8cf3 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2766,7 +2766,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
 	struct ieee80211_mgmt *mgmt;
 	bool hidden = false;
 	u8 *ies;
-	int ies_len;
 	struct wmi_connect_cmd p;
 	int res;
 	int i, ret;
@@ -2804,7 +2803,6 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
 	ies = mgmt->u.beacon.variable;
 	if (ies > info->beacon.head + info->beacon.head_len)
 		return -EINVAL;
-	ies_len = info->beacon.head + info->beacon.head_len - ies;
 
 	if (info->ssid == NULL)
 		return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 1379906..8da9506 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1001,7 +1001,7 @@ static void aggr_slice_amsdu(struct aggr_info *p_aggr,
 
 	while (amsdu_len > mac_hdr_len) {
 		hdr = (struct ethhdr *) framep;
-		payload_8023_len = ntohs(hdr->h_proto);
+		payload_8023_len = be16_to_cpu(hdr->h_proto);
 
 		if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
 		    payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 783a38f..1f352301 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -61,13 +61,12 @@
 	depends on ATH9K && DEBUG_FS
 	select MAC80211_DEBUGFS
 	select ATH9K_COMMON_DEBUG
-	select RELAY
 	---help---
 	  Say Y, if you need access to ath9k's statistics for
 	  interrupts, rate control, etc.
 
-	  Also required for changing debug message flags at run time.
-	  As well as access to the FFT/spectral data and TX99.
+	  Also required for changing debug message flags at run time and for
+	  TX99.
 
 config ATH9K_STATION_STATISTICS
 	bool "Detailed station statistics"
@@ -177,7 +176,6 @@
 	bool "Atheros ath9k_htc debugging"
 	depends on ATH9K_HTC && DEBUG_FS
 	select ATH9K_COMMON_DEBUG
-	select RELAY
 	---help---
 	  Say Y, if you need access to ath9k_htc's statistics.
 	  As well as access to the FFT/spectral data.
@@ -192,3 +190,11 @@
 
 	  Say Y, feeds the entropy directly from the WiFi driver to the input
 	  pool.
+
+config ATH9K_COMMON_SPECTRAL
+	bool "Atheros ath9k/ath9k_htc spectral scan support"
+	depends on ATH9K_DEBUGFS || ATH9K_HTC_DEBUGFS
+	select RELAY
+	default n
+	---help---
+	  Say Y to enable access to the FFT/spectral data via debugfs.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index d804ce7..f71b2ad 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -62,8 +62,8 @@
 			common-init.o \
 			common-beacon.o \
 
-ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \
-					     common-spectral.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o
+ath9k_common-$(CONFIG_ATH9K_COMMON_SPECTRAL) += common-spectral.o
 
 ath9k_htc-y +=	htc_hst.o \
 		hif_usb.o \
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.h b/drivers/net/wireless/ath/ath9k/common-spectral.h
index 5d1a51d..303ab47 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.h
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.h
@@ -151,7 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
 	return bins[0] & 0x3f;
 }
 
-#ifdef CONFIG_ATH9K_COMMON_DEBUG
+#ifdef CONFIG_ATH9K_COMMON_SPECTRAL
 void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
 void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
 
@@ -183,6 +183,6 @@ static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv,
 {
 	return 0;
 }
-#endif /* CONFIG_ATH9K_COMMON_DEBUG */
+#endif /* CONFIG_ATH9K_COMMON_SPECTRAL */
 
 #endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index 40a397f..6fee9a4 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -123,11 +123,9 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
 			fft = (struct ath9k_dfs_fft_40 *) (data + 2);
 			ath_dbg(common, DFS, "fixing datalen by 2\n");
 		}
-		if (IS_CHAN_HT40MINUS(ah->curchan)) {
-			int temp = is_ctl;
-			is_ctl = is_ext;
-			is_ext = temp;
-		}
+		if (IS_CHAN_HT40MINUS(ah->curchan))
+			swap(is_ctl, is_ext);
+
 		for (i = 0; i < FFT_NUM_SAMPLES; i++)
 			max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
 							      is_ext);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f808e58..a82ad73 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1683,6 +1683,10 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		if (tid >= ATH9K_HTC_MAX_TID) {
+			ret = -EINVAL;
+			break;
+		}
 		ista = (struct ath9k_htc_sta *) sta->drv_priv;
 		spin_lock_bh(&priv->tx.tx_lock);
 		ista->tid_state[tid] = AGGR_OPERATIONAL;
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index b765c64..1829635 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -348,6 +348,13 @@ enum wcn36xx_hal_host_msg_type {
 	WCN36XX_HAL_DHCP_START_IND = 189,
 	WCN36XX_HAL_DHCP_STOP_IND = 190,
 
+	/* Scan Offload(hw) APIs */
+	WCN36XX_HAL_START_SCAN_OFFLOAD_REQ = 204,
+	WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205,
+	WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206,
+	WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207,
+	WCN36XX_HAL_SCAN_OFFLOAD_IND = 210,
+
 	WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
 
 	WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
@@ -1115,6 +1122,101 @@ struct wcn36xx_hal_finish_scan_rsp_msg {
 
 } __packed;
 
+enum wcn36xx_hal_scan_type {
+	WCN36XX_HAL_SCAN_TYPE_PASSIVE = 0x00,
+	WCN36XX_HAL_SCAN_TYPE_ACTIVE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_mac_ssid {
+	u8 length;
+	u8 ssid[32];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_req_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* BSSIDs hot list */
+	u8 num_bssid;
+	u8 bssids[4][ETH_ALEN];
+
+	/* Directed probe-requests will be sent for listed SSIDs (max 10)*/
+	u8 num_ssid;
+	struct wcn36xx_hal_mac_ssid ssids[10];
+
+	/* Report AP with hidden ssid */
+	u8 scan_hidden;
+
+	/* Self MAC address */
+	u8 mac[ETH_ALEN];
+
+	/* BSS type */
+	enum wcn36xx_hal_bss_type bss_type;
+
+	/* Scan type */
+	enum wcn36xx_hal_scan_type scan_type;
+
+	/* Minimum scanning time on each channel (ms) */
+	u32 min_ch_time;
+
+	/* Maximum scanning time on each channel */
+	u32 max_ch_time;
+
+	/* Is a p2p search */
+	u8 p2p_search;
+
+	/* Channels to scan */
+	u8 num_channel;
+	u8 channels[80];
+
+	/* IE field */
+	u16 ie_len;
+	u8 ie[0];
+} __packed;
+
+struct wcn36xx_hal_start_scan_offload_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
+enum wcn36xx_hal_scan_offload_ind_type {
+	/* Scan has been started */
+	WCN36XX_HAL_SCAN_IND_STARTED = 0x01,
+	/* Scan has been completed */
+	WCN36XX_HAL_SCAN_IND_COMPLETED = 0x02,
+	/* Moved to foreign channel */
+	WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL = 0x08,
+	/* scan request has been dequeued */
+	WCN36XX_HAL_SCAN_IND_DEQUEUED = 0x10,
+	/* preempted by other high priority scan */
+	WCN36XX_HAL_SCAN_IND_PREEMPTED = 0x20,
+	/* scan start failed */
+	WCN36XX_HAL_SCAN_IND_FAILED = 0x40,
+	 /*scan restarted */
+	WCN36XX_HAL_SCAN_IND_RESTARTED = 0x80,
+	WCN36XX_HAL_SCAN_IND_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+struct wcn36xx_hal_scan_offload_ind {
+	struct wcn36xx_hal_msg_header header;
+
+	u32 type;
+	u32 channel_mhz;
+	u32 scan_id;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_req_msg {
+	struct wcn36xx_hal_msg_header header;
+} __packed;
+
+struct wcn36xx_hal_stop_scan_offload_rsp_msg {
+	struct wcn36xx_hal_msg_header header;
+
+	/* success or failure */
+	u32 status;
+} __packed;
+
 enum wcn36xx_hal_rate_index {
 	HW_RATE_INDEX_1MBPS	= 0x82,
 	HW_RATE_INDEX_2MBPS	= 0x84,
@@ -1507,11 +1609,6 @@ struct wcn36xx_hal_edca_param_record {
 	u16 txop_limit;
 } __packed;
 
-struct wcn36xx_hal_mac_ssid {
-	u8 length;
-	u8 ssid[32];
-} __packed;
-
 /* Concurrency role. These are generic IDs that identify the various roles
  *  in the software system. */
 enum wcn36xx_hal_con_mode {
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 987f125..ab5be6d 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -641,7 +641,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
 			   struct ieee80211_scan_request *hw_req)
 {
 	struct wcn36xx *wcn = hw->priv;
-
 	mutex_lock(&wcn->scan_lock);
 	if (wcn->scan_req) {
 		mutex_unlock(&wcn->scan_lock);
@@ -650,11 +649,16 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
 
 	wcn->scan_aborted = false;
 	wcn->scan_req = &hw_req->req;
+
 	mutex_unlock(&wcn->scan_lock);
 
-	schedule_work(&wcn->scan_work);
+	if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+		/* legacy manual/sw scan */
+		schedule_work(&wcn->scan_work);
+		return 0;
+	}
 
-	return 0;
+	return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
 }
 
 static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -662,6 +666,12 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
 {
 	struct wcn36xx *wcn = hw->priv;
 
+	if (!wcn36xx_smd_stop_hw_scan(wcn)) {
+		struct cfg80211_scan_info scan_info = { .aborted = true };
+
+		ieee80211_scan_completed(wcn->hw, &scan_info);
+	}
+
 	mutex_lock(&wcn->scan_lock);
 	wcn->scan_aborted = true;
 	mutex_unlock(&wcn->scan_lock);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 9c6590d..2914618 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -73,6 +73,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
 	WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
 	WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
 	WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+	WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
+	WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
 	WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
 	WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
 };
@@ -613,6 +615,85 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
 	return ret;
 }
 
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			      struct cfg80211_scan_request *req)
+{
+	struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
+	int ret, i;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
+
+	msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
+	msg_body.min_ch_time = 30;
+	msg_body.min_ch_time = 100;
+	msg_body.scan_hidden = 1;
+	memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+	msg_body.p2p_search = vif->p2p;
+
+	msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
+	for (i = 0; i < msg_body.num_ssid; i++) {
+		msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
+						sizeof(msg_body.ssids[i].ssid));
+		memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid,
+		       msg_body.ssids[i].length);
+	}
+
+	msg_body.num_channel = min_t(u8, req->n_channels,
+				     sizeof(msg_body.channels));
+	for (i = 0; i < msg_body.num_channel; i++)
+		msg_body.channels[i] = req->channels[i]->hw_value;
+
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL,
+		    "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n",
+		    msg_body.num_channel, msg_body.num_ssid,
+		    msg_body.p2p_search ? "yes" : "no");
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_start_scan_offload failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_start_scan_offload response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn)
+{
+	struct wcn36xx_hal_stop_scan_offload_req_msg msg_body;
+	int ret;
+
+	mutex_lock(&wcn->hal_mutex);
+	INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ);
+	PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "hal stop hw-scan\n");
+
+	ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+	if (ret) {
+		wcn36xx_err("Sending hal_stop_scan_offload failed\n");
+		goto out;
+	}
+	ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+	if (ret) {
+		wcn36xx_err("hal_stop_scan_offload response failed err=%d\n",
+			    ret);
+		goto out;
+	}
+out:
+	mutex_unlock(&wcn->hal_mutex);
+	return ret;
+}
+
 static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
 {
 	struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
@@ -2039,6 +2120,40 @@ static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
 	return 0;
 }
 
+static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+	struct wcn36xx_hal_scan_offload_ind *rsp = buf;
+	struct cfg80211_scan_info scan_info = {};
+
+	if (len != sizeof(*rsp)) {
+		wcn36xx_warn("Corrupted delete scan indication\n");
+		return -EIO;
+	}
+
+	wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type);
+
+	switch (rsp->type) {
+	case WCN36XX_HAL_SCAN_IND_FAILED:
+		scan_info.aborted = true;
+	case WCN36XX_HAL_SCAN_IND_COMPLETED:
+		mutex_lock(&wcn->scan_lock);
+		wcn->scan_req = NULL;
+		mutex_unlock(&wcn->scan_lock);
+		ieee80211_scan_completed(wcn->hw, &scan_info);
+		break;
+	case WCN36XX_HAL_SCAN_IND_STARTED:
+	case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
+	case WCN36XX_HAL_SCAN_IND_DEQUEUED:
+	case WCN36XX_HAL_SCAN_IND_PREEMPTED:
+	case WCN36XX_HAL_SCAN_IND_RESTARTED:
+		break;
+	default:
+		wcn36xx_warn("Unknown scan indication type %x\n", rsp->type);
+	}
+
+	return 0;
+}
+
 static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
 					 void *buf,
 					 size_t len)
@@ -2250,6 +2365,8 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
 	case WCN36XX_HAL_CH_SWITCH_RSP:
 	case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
 	case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
+	case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
+	case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
 		memcpy(wcn->hal_buf, buf, len);
 		wcn->hal_rsp_len = len;
 		complete(&wcn->hal_rsp_compl);
@@ -2262,6 +2379,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
 	case WCN36XX_HAL_MISSED_BEACON_IND:
 	case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
 	case WCN36XX_HAL_PRINT_REG_INFO_IND:
+	case WCN36XX_HAL_SCAN_OFFLOAD_IND:
 		msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
 		if (!msg_ind) {
 			wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
@@ -2298,6 +2416,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
 	hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
 				       struct wcn36xx_hal_ind_msg,
 				       list);
+	list_del(wcn->hal_ind_queue.next);
+	spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
 
 	msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
 
@@ -2326,12 +2446,14 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
 					       hal_ind_msg->msg,
 					       hal_ind_msg->msg_len);
 		break;
+	case WCN36XX_HAL_SCAN_OFFLOAD_IND:
+		wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg,
+					hal_ind_msg->msg_len);
+		break;
 	default:
 		wcn36xx_err("SMD_EVENT (%d) not supported\n",
 			      msg_header->msg_type);
 	}
-	list_del(wcn->hal_ind_queue.next);
-	spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
 	kfree(hal_ind_msg);
 }
 int wcn36xx_smd_open(struct wcn36xx *wcn)
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 013fc95..8076edf 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -65,6 +65,9 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
 int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
 			    enum wcn36xx_hal_sys_mode mode);
 int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
+int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+			      struct cfg80211_scan_request *req);
+int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn);
 int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
 int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
 int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 85d5c04..771a534 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -901,7 +901,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 			 u64 *cookie)
 {
 	const u8 *buf = params->buf;
-	size_t len = params->len;
+	size_t len = params->len, total;
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	int rc;
 	bool tx_status = false;
@@ -926,7 +926,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	if (len < sizeof(struct ieee80211_hdr_3addr))
 		return -EINVAL;
 
-	cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+	total = sizeof(*cmd) + len;
+	if (total < len)
+		return -EINVAL;
+
+	cmd = kmalloc(total, GFP_KERNEL);
 	if (!cmd) {
 		rc = -ENOMEM;
 		goto out;
@@ -936,7 +940,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	cmd->len = cpu_to_le16(len);
 	memcpy(cmd->payload, buf, len);
 
-	rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+	rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, total,
 		      WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
 	if (rc == 0)
 		tx_status = !evt.evt.status;
@@ -1727,9 +1731,12 @@ static int wil_cfg80211_suspend(struct wiphy *wiphy,
 
 	wil_dbg_pm(wil, "suspending\n");
 
-	wil_p2p_stop_discovery(wil);
-
+	mutex_lock(&wil->mutex);
+	mutex_lock(&wil->p2p_wdev_mutex);
+	wil_p2p_stop_radio_operations(wil);
 	wil_abort_scan(wil, true);
+	mutex_unlock(&wil->p2p_wdev_mutex);
+	mutex_unlock(&wil->mutex);
 
 out:
 	return rc;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e58dc6d..4475937 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -242,12 +242,19 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
 static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
 
 	wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, tx));
 	wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, rx));
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -265,15 +272,37 @@ static const struct file_operations fops_mbox = {
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-	writel(val, (void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	writel(val, (void __iomem *)d->offset);
 	wmb(); /* make sure write propagated to HW */
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-	*val = readl((void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	*val = readl((void __iomem *)d->offset);
+
+	wil_pm_runtime_put(wil);
 
 	return 0;
 }
@@ -284,10 +313,21 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
 static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
 						   umode_t mode,
 						   struct dentry *parent,
-						   void *value)
+						   void *value,
+						   struct wil6210_priv *wil)
 {
-	return debugfs_create_file(name, mode, parent, value,
-				   &fops_iomem_x32);
+	struct dentry *file;
+	struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+					      wil->dbg_data.iomem_data_count];
+
+	data->wil = wil;
+	data->offset = value;
+
+	file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+	if (!IS_ERR_OR_NULL(file))
+		wil->dbg_data.iomem_data_count++;
+
+	return file;
 }
 
 static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -346,7 +386,8 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
 		case doff_io32:
 			f = wil_debugfs_create_iomem_x32(tbl[i].name,
 							 tbl[i].mode, dbg,
-							 base + tbl[i].off);
+							 base + tbl[i].off,
+							 wil);
 			break;
 		case doff_u8:
 			f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
@@ -475,13 +516,22 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
 static int wil_memread_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
-	void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+	void __iomem *a;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
 	if (a)
 		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
 	else
 		seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -502,10 +552,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
 {
 	enum { max_count = 4096 };
 	struct wil_blob_wrapper *wil_blob = file->private_data;
+	struct wil6210_priv *wil = wil_blob->wil;
 	loff_t pos = *ppos;
 	size_t available = wil_blob->blob.size;
 	void *buf;
 	size_t ret;
+	int rc;
 
 	if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
 	    test_bit(wil_status_suspended, wil_blob->wil->status))
@@ -526,10 +578,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
 	if (!buf)
 		return -ENOMEM;
 
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0) {
+		kfree(buf);
+		return rc;
+	}
+
 	wil_memcpy_fromio_32(buf, (const void __iomem *)
 			     wil_blob->blob.data + pos, count);
 
 	ret = copy_to_user(user_buf, buf, count);
+
+	wil_pm_runtime_put(wil);
+
 	kfree(buf);
 	if (ret == count)
 		return -EFAULT;
@@ -1571,8 +1632,6 @@ static ssize_t wil_write_suspend_stats(struct file *file,
 	struct wil6210_priv *wil = file->private_data;
 
 	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-	wil->suspend_stats.min_suspend_time = ULONG_MAX;
-	wil->suspend_stats.collection_start = ktime_get();
 
 	return len;
 }
@@ -1582,33 +1641,41 @@ static ssize_t wil_read_suspend_stats(struct file *file,
 				      size_t count, loff_t *ppos)
 {
 	struct wil6210_priv *wil = file->private_data;
-	static char text[400];
-	int n;
-	unsigned long long stats_collection_time =
-		ktime_to_us(ktime_sub(ktime_get(),
-				      wil->suspend_stats.collection_start));
+	char *text;
+	int n, ret, text_size = 500;
 
-	n = snprintf(text, sizeof(text),
-		     "Suspend statistics:\n"
+	text = kmalloc(text_size, GFP_KERNEL);
+	if (!text)
+		return -ENOMEM;
+
+	n = snprintf(text, text_size,
+		     "Radio on suspend statistics:\n"
 		     "successful suspends:%ld failed suspends:%ld\n"
 		     "successful resumes:%ld failed resumes:%ld\n"
-		     "rejected by host:%ld rejected by device:%ld\n"
-		     "total suspend time:%lld min suspend time:%lld\n"
-		     "max suspend time:%lld stats collection time: %lld\n",
-		     wil->suspend_stats.successful_suspends,
-		     wil->suspend_stats.failed_suspends,
-		     wil->suspend_stats.successful_resumes,
-		     wil->suspend_stats.failed_resumes,
-		     wil->suspend_stats.rejected_by_host,
+		     "rejected by device:%ld\n"
+		     "Radio off suspend statistics:\n"
+		     "successful suspends:%ld failed suspends:%ld\n"
+		     "successful resumes:%ld failed resumes:%ld\n"
+		     "General statistics:\n"
+		     "rejected by host:%ld\n",
+		     wil->suspend_stats.r_on.successful_suspends,
+		     wil->suspend_stats.r_on.failed_suspends,
+		     wil->suspend_stats.r_on.successful_resumes,
+		     wil->suspend_stats.r_on.failed_resumes,
 		     wil->suspend_stats.rejected_by_device,
-		     wil->suspend_stats.total_suspend_time,
-		     wil->suspend_stats.min_suspend_time,
-		     wil->suspend_stats.max_suspend_time,
-		     stats_collection_time);
+		     wil->suspend_stats.r_off.successful_suspends,
+		     wil->suspend_stats.r_off.failed_suspends,
+		     wil->suspend_stats.r_off.successful_resumes,
+		     wil->suspend_stats.r_off.failed_resumes,
+		     wil->suspend_stats.rejected_by_host);
 
-	n = min_t(int, n, sizeof(text));
+	n = min_t(int, n, text_size);
 
-	return simple_read_from_buffer(user_buf, count, ppos, text, n);
+	ret = simple_read_from_buffer(user_buf, count, ppos, text, n);
+
+	kfree(text);
+
+	return ret;
 }
 
 static const struct file_operations fops_suspend_stats = {
@@ -1736,14 +1803,31 @@ static const struct dbg_off dbg_statics[] = {
 	{},
 };
 
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+				ARRAY_SIZE(dbg_wil_regs) - 1 +
+				ARRAY_SIZE(pseudo_isr_off) - 1 +
+				ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+				ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+				ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
 int wil6210_debugfs_init(struct wil6210_priv *wil)
 {
 	struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
 			wil_to_wiphy(wil)->debugfsdir);
-
 	if (IS_ERR_OR_NULL(dbg))
 		return -ENODEV;
 
+	wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+					 sizeof(struct wil_debugfs_iomem_data),
+					 GFP_KERNEL);
+	if (!wil->dbg_data.data_arr) {
+		debugfs_remove_recursive(dbg);
+		wil->debug = NULL;
+		return -ENOMEM;
+	}
+
+	wil->dbg_data.iomem_data_count = 0;
+
 	wil_pmc_init(wil);
 
 	wil6210_debugfs_init_files(wil, dbg);
@@ -1758,8 +1842,6 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
 
 	wil6210_debugfs_create_ITR_CNT(wil, dbg);
 
-	wil->suspend_stats.collection_start = ktime_get();
-
 	return 0;
 }
 
@@ -1768,6 +1850,8 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
 	debugfs_remove_recursive(wil->debug);
 	wil->debug = NULL;
 
+	kfree(wil->dbg_data.data_arr);
+
 	/* free pmc memory without sending command to fw, as it will
 	 * be reset on the way down anyway
 	 */
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index adcfef4..66200f6 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -47,9 +47,14 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
 	u32 tx_itr_en, tx_itr_val = 0;
 	u32 rx_itr_en, rx_itr_val = 0;
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
 
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
 	if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
 		tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
@@ -58,6 +63,8 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 	if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
 		rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
+	wil_pm_runtime_put(wil);
+
 	cp->tx_coalesce_usecs = tx_itr_val;
 	cp->rx_coalesce_usecs = rx_itr_val;
 	return 0;
@@ -67,6 +74,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
 				       struct ethtool_coalesce *cp)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
 		     cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
@@ -86,8 +94,15 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
 
 	wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
 	wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	wil_configure_interrupt_moderation(wil);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 
 out_bad:
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index e01acac..77d1902 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -26,14 +26,17 @@
 					     prefix_type, rowsize,	\
 					     groupsize, buf, len, ascii)
 
-#define FW_ADDR_CHECK(ioaddr, val, msg) do { \
-		ioaddr = wmi_buffer(wil, val); \
-		if (!ioaddr) { \
-			wil_err_fw(wil, "bad " msg ": 0x%08x\n", \
-				   le32_to_cpu(val)); \
-			return -EINVAL; \
-		} \
-	} while (0)
+static bool wil_fw_addr_check(struct wil6210_priv *wil,
+			      void __iomem **ioaddr, __le32 val,
+			      u32 size, const char *msg)
+{
+	*ioaddr = wmi_buffer_block(wil, val, size);
+	if (!(*ioaddr)) {
+		wil_err_fw(wil, "bad %s: 0x%08x\n", msg, le32_to_cpu(val));
+		return false;
+	}
+	return true;
+}
 
 /**
  * wil_fw_verify - verify firmware file validity
@@ -124,24 +127,19 @@ static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
 	return 0;
 }
 
-static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
-			     size_t size)
-{
-	wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true);
-
-	return 0;
-}
-
 static int
-fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
-		       size_t size)
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+		  size_t size)
 {
 	const struct wil_fw_record_capabilities *rec = data;
 	size_t capa_size;
 
 	if (size < sizeof(*rec) ||
-	    le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+	    le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) {
+		wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+				data, size, true);
 		return 0;
+	}
 
 	capa_size = size - offsetof(struct wil_fw_record_capabilities,
 				    capabilities);
@@ -165,7 +163,8 @@ static int fw_handle_data(struct wil6210_priv *wil, const void *data,
 		return -EINVAL;
 	}
 
-	FW_ADDR_CHECK(dst, d->addr, "address");
+	if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+		return -EINVAL;
 	wil_dbg_fw(wil, "write [0x%08x] <== %zu bytes\n", le32_to_cpu(d->addr),
 		   s);
 	wil_memcpy_toio_32(dst, d->data, s);
@@ -197,7 +196,8 @@ static int fw_handle_fill(struct wil6210_priv *wil, const void *data,
 		return -EINVAL;
 	}
 
-	FW_ADDR_CHECK(dst, d->addr, "address");
+	if (!wil_fw_addr_check(wil, &dst, d->addr, s, "address"))
+		return -EINVAL;
 
 	v = le32_to_cpu(d->value);
 	wil_dbg_fw(wil, "fill [0x%08x] <== 0x%08x, %zu bytes\n",
@@ -253,7 +253,8 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
 		u32 v = le32_to_cpu(block[i].value);
 		u32 x, y;
 
-		FW_ADDR_CHECK(dst, block[i].addr, "address");
+		if (!wil_fw_addr_check(wil, &dst, block[i].addr, 0, "address"))
+			return -EINVAL;
 
 		x = readl(dst);
 		y = (x & m) | (v & ~m);
@@ -319,10 +320,15 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
 	wil_dbg_fw(wil, "gw write record [%3d] blocks, cmd 0x%08x\n",
 		   n, gw_cmd);
 
-	FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
-	FW_ADDR_CHECK(gwa_val, d->gateway_value_addr, "gateway_value_addr");
-	FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-	FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+	if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+			       "gateway_addr_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_val, d->gateway_value_addr, 0,
+			       "gateway_value_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+			       "gateway_cmd_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+			       "gateway_ctrl_address"))
+		return -EINVAL;
 
 	wil_dbg_fw(wil, "gw addresses: addr 0x%08x val 0x%08x"
 		   " cmd 0x%08x ctl 0x%08x\n",
@@ -378,12 +384,19 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
 	wil_dbg_fw(wil, "gw4 write record [%3d] blocks, cmd 0x%08x\n",
 		   n, gw_cmd);
 
-	FW_ADDR_CHECK(gwa_addr, d->gateway_addr_addr, "gateway_addr_addr");
+	if (!wil_fw_addr_check(wil, &gwa_addr, d->gateway_addr_addr, 0,
+			       "gateway_addr_addr"))
+		return -EINVAL;
 	for (k = 0; k < ARRAY_SIZE(block->value); k++)
-		FW_ADDR_CHECK(gwa_val[k], d->gateway_value_addr[k],
-			      "gateway_value_addr");
-	FW_ADDR_CHECK(gwa_cmd, d->gateway_cmd_addr, "gateway_cmd_addr");
-	FW_ADDR_CHECK(gwa_ctl, d->gateway_ctrl_address, "gateway_ctrl_address");
+		if (!wil_fw_addr_check(wil, &gwa_val[k],
+				       d->gateway_value_addr[k],
+				       0, "gateway_value_addr"))
+			return -EINVAL;
+	if (!wil_fw_addr_check(wil, &gwa_cmd, d->gateway_cmd_addr, 0,
+			       "gateway_cmd_addr") ||
+	    !wil_fw_addr_check(wil, &gwa_ctl, d->gateway_ctrl_address, 0,
+			       "gateway_ctrl_address"))
+		return -EINVAL;
 
 	wil_dbg_fw(wil, "gw4 addresses: addr 0x%08x cmd 0x%08x ctl 0x%08x\n",
 		   le32_to_cpu(d->gateway_addr_addr),
@@ -422,7 +435,7 @@ static const struct {
 	int (*parse_handler)(struct wil6210_priv *wil, const void *data,
 			     size_t size);
 } wil_fw_handlers[] = {
-	{wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+	{wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
 	{wil_fw_type_data, fw_handle_data, fw_ignore_section},
 	{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
 	/* wil_fw_type_action */
@@ -517,7 +530,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
 
 	rc = request_firmware(&fw, name, wil_to_dev(wil));
 	if (rc) {
-		wil_err_fw(wil, "Failed to load firmware %s\n", name);
+		wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
 		return rc;
 	}
 	wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 59def4f..5cf3417 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -358,6 +358,25 @@ static void wil_cache_mbox_regs(struct wil6210_priv *wil)
 	wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
 }
 
+static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+{
+	size_t min_size = sizeof(struct wil6210_mbox_hdr) +
+		sizeof(struct wmi_cmd_hdr);
+
+	if (wil->mbox_ctl.rx.entry_size < min_size) {
+		wil_err(wil, "rx mbox entry too small (%d)\n",
+			wil->mbox_ctl.rx.entry_size);
+		return false;
+	}
+	if (wil->mbox_ctl.tx.entry_size < min_size) {
+		wil_err(wil, "tx mbox entry too small (%d)\n",
+			wil->mbox_ctl.tx.entry_size);
+		return false;
+	}
+
+	return true;
+}
+
 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
@@ -393,7 +412,8 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
 	if (isr & ISR_MISC_FW_READY) {
 		wil_dbg_irq(wil, "IRQ: FW ready\n");
 		wil_cache_mbox_regs(wil);
-		set_bit(wil_status_mbox_ready, wil->status);
+		if (wil_validate_mbox_regs(wil))
+			set_bit(wil_status_mbox_ready, wil->status);
 		/**
 		 * Actual FW ready indicated by the
 		 * WMI_FW_READY_EVENTID
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 885924a..1b53cd3 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -579,7 +579,6 @@ int wil_priv_init(struct wil6210_priv *wil)
 	wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
 			      WMI_WAKEUP_TRIGGER_BCAST;
 	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
-	wil->suspend_stats.min_suspend_time = ULONG_MAX;
 	wil->vring_idle_trsh = 16;
 
 	return 0;
@@ -760,6 +759,8 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
 	u8 retry_short;
 	int rc;
 
+	wil_refresh_fw_capabilities(wil);
+
 	rc = wmi_get_mgmt_retry(wil, &retry_short);
 	if (!rc) {
 		wiphy->retry_short = retry_short;
@@ -767,6 +768,25 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
 	}
 }
 
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
+{
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+
+	wil->keep_radio_on_during_sleep =
+		wil->platform_ops.keep_radio_on_during_sleep &&
+		wil->platform_ops.keep_radio_on_during_sleep(
+			wil->platform_handle) &&
+		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+		 wil->keep_radio_on_during_sleep);
+
+	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+		wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	else
+		wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+}
+
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 {
 	le32_to_cpus(&r->base);
@@ -1071,11 +1091,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 			return rc;
 		}
 
+		wil_collect_fw_info(wil);
+
 		if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
 			wil_ps_update(wil, wil->ps_profile);
 
-		wil_collect_fw_info(wil);
-
 		if (wil->platform_ops.notify) {
 			rc = wil->platform_ops.notify(wil->platform_handle,
 						      WIL_PLATFORM_EVT_FW_RDY);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 4a6ab2d..b641ac1 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -21,6 +21,7 @@
 static int wil_open(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "open\n");
 
@@ -30,16 +31,29 @@ static int wil_open(struct net_device *ndev)
 		return -EINVAL;
 	}
 
-	return wil_up(wil);
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0)
+		return rc;
+
+	rc = wil_up(wil);
+	if (rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static int wil_stop(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "stop\n");
 
-	return wil_down(wil);
+	rc = wil_down(wil);
+	if (!rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static const struct net_device_ops wil_netdev_ops = {
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 6a3ab4bf..42a5480 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -21,6 +21,7 @@
 #include <linux/suspend.h>
 #include "wil6210.h"
 #include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
 
 static bool use_msi = true;
 module_param(use_msi, bool, 0444);
@@ -31,10 +32,8 @@ module_param(ftm_mode, bool, 0444);
 MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_notify(struct notifier_block *notify_block,
 			     unsigned long mode, void *unused);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 static
@@ -84,9 +83,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 
 	/* extract FW capabilities from file without loading the FW */
 	wil_request_firmware(wil, wil->wil_fw_name, false);
-
-	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
-		wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wil_refresh_fw_capabilities(wil);
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -296,15 +293,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	wil_set_capabilities(wil);
 	wil6210_clear_irq(wil);
 
-	wil->keep_radio_on_during_sleep =
-		wil->platform_ops.keep_radio_on_during_sleep &&
-		wil->platform_ops.keep_radio_on_during_sleep(
-			wil->platform_handle) &&
-		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
-
-	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
-		 wil->keep_radio_on_during_sleep);
-
 	/* FW should raise IRQ when ready */
 	rc = wil_if_pcie_enable(wil);
 	if (rc) {
@@ -320,7 +308,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	}
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	wil->pm_notify.notifier_call = wil6210_pm_notify;
 	rc = register_pm_notifier(&wil->pm_notify);
 	if (rc)
@@ -328,11 +315,11 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 		 * be prevented in a later phase if needed
 		 */
 		wil_err(wil, "register_pm_notifier failed: %d\n", rc);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 	wil6210_debugfs_init(wil);
 
+	wil_pm_runtime_allow(wil);
 
 	return 0;
 
@@ -360,11 +347,11 @@ static void wil_pcie_remove(struct pci_dev *pdev)
 	wil_dbg_misc(wil, "pcie_remove\n");
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	unregister_pm_notifier(&wil->pm_notify);
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
+	wil_pm_runtime_forbid(wil);
+
 	wil6210_debugfs_remove(wil);
 	rtnl_lock();
 	wil_p2p_wdev_free(wil);
@@ -386,13 +373,15 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
 	int rc = 0;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -400,16 +389,18 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
 	if (rc)
 		goto out;
 
-	rc = wil_suspend(wil, is_runtime);
+	rc = wil_suspend(wil, is_runtime, keep_radio_on);
 	if (!rc) {
-		wil->suspend_stats.successful_suspends++;
-
-		/* If platform device supports keep_radio_on_during_sleep
-		 * it will control PCIe master
+		/* In case radio stays on, platform device will control
+		 * PCIe master
 		 */
-		if (!wil->keep_radio_on_during_sleep)
+		if (!keep_radio_on) {
 			/* disable bus mastering */
 			pci_clear_master(pdev);
+			wil->suspend_stats.r_off.successful_suspends++;
+		} else {
+			wil->suspend_stats.r_on.successful_suspends++;
+		}
 	}
 out:
 	return rc;
@@ -420,23 +411,32 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
 	int rc = 0;
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
-	/* If platform device supports keep_radio_on_during_sleep it will
-	 * control PCIe master
+	/* In case radio stays on, platform device will control
+	 * PCIe master
 	 */
-	if (!wil->keep_radio_on_during_sleep)
+	if (!keep_radio_on)
 		/* allow master */
 		pci_set_master(pdev);
-	rc = wil_resume(wil, is_runtime);
+	rc = wil_resume(wil, is_runtime, keep_radio_on);
 	if (rc) {
 		wil_err(wil, "device failed to resume (%d)\n", rc);
-		wil->suspend_stats.failed_resumes++;
-		if (!wil->keep_radio_on_during_sleep)
+		if (!keep_radio_on) {
 			pci_clear_master(pdev);
+			wil->suspend_stats.r_off.failed_resumes++;
+		} else {
+			wil->suspend_stats.r_on.failed_resumes++;
+		}
 	} else {
-		wil->suspend_stats.successful_resumes++;
+		if (keep_radio_on)
+			wil->suspend_stats.r_on.successful_resumes++;
+		else
+			wil->suspend_stats.r_off.successful_resumes++;
 	}
 
 	return rc;
@@ -490,12 +490,43 @@ static int wil6210_pm_resume(struct device *dev)
 {
 	return wil6210_resume(dev, false);
 }
-#endif /* CONFIG_PM_SLEEP */
 
+static int wil6210_pm_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "Runtime idle\n");
+
+	return wil_can_suspend(wil, true);
+}
+
+static int wil6210_pm_runtime_resume(struct device *dev)
+{
+	return wil6210_resume(dev, true);
+}
+
+static int wil6210_pm_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 1;
+	}
+
+	return wil6210_suspend(dev, true);
+}
 #endif /* CONFIG_PM */
 
 static const struct dev_pm_ops wil6210_pm_ops = {
+#ifdef CONFIG_PM
 	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+	SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+			   wil6210_pm_runtime_resume,
+			   wil6210_pm_runtime_idle)
+#endif /* CONFIG_PM */
 };
 
 static struct pci_driver wil6210_driver = {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 8f5d1b44..056b180 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -16,15 +16,30 @@
 
 #include "wil6210.h"
 #include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 {
 	int rc = 0;
 	struct wireless_dev *wdev = wil->wdev;
 	struct net_device *ndev = wil_to_ndev(wil);
+	bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+				 wil->fw_capabilities);
 
 	wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
 
+	if (wmi_only || debug_fw) {
+		wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+			   wmi_only ? "wmi_only" : "debug_fw");
+		rc = -EBUSY;
+		goto out;
+	}
+	if (is_runtime && !wil->platform_ops.suspend) {
+		rc = -EBUSY;
+		goto out;
+	}
 	if (!(ndev->flags & IFF_UP)) {
 		/* can always sleep when down */
 		wil_dbg_pm(wil, "Interface is down\n");
@@ -44,6 +59,10 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 	/* interface is running */
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_MONITOR:
+		wil_dbg_pm(wil, "Sniffer\n");
+		rc = -EBUSY;
+		goto out;
+	/* for STA-like interface, don't runtime suspend */
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (test_bit(wil_status_fwconnecting, wil->status)) {
@@ -51,6 +70,12 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 			rc = -EBUSY;
 			goto out;
 		}
+		/* Runtime pm not supported in case the interface is up */
+		if (is_runtime) {
+			wil_dbg_pm(wil, "STA-like interface\n");
+			rc = -EBUSY;
+			goto out;
+		}
 		break;
 	/* AP-like interface - can't suspend */
 	default:
@@ -158,7 +183,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 					break;
 				wil_err(wil,
 					"TO waiting for idle RX, suspend failed\n");
-				wil->suspend_stats.failed_suspends++;
+				wil->suspend_stats.r_on.failed_suspends++;
 				goto resume_after_fail;
 			}
 			wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
@@ -174,7 +199,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 	 */
 	if (!wil_is_wmi_idle(wil)) {
 		wil_err(wil, "suspend failed due to pending WMI events\n");
-		wil->suspend_stats.failed_suspends++;
+		wil->suspend_stats.r_on.failed_suspends++;
 		goto resume_after_fail;
 	}
 
@@ -188,7 +213,7 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
 		if (rc) {
 			wil_err(wil, "platform device failed to suspend (%d)\n",
 				rc);
-			wil->suspend_stats.failed_suspends++;
+			wil->suspend_stats.r_on.failed_suspends++;
 			wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 			wil_unmask_irq(wil);
 			goto resume_after_fail;
@@ -235,6 +260,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 		rc = wil_down(wil);
 		if (rc) {
 			wil_err(wil, "wil_down : %d\n", rc);
+			wil->suspend_stats.r_off.failed_suspends++;
 			goto out;
 		}
 	}
@@ -247,6 +273,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
 		rc = wil->platform_ops.suspend(wil->platform_handle, false);
 		if (rc) {
 			wil_enable_irq(wil);
+			wil->suspend_stats.r_off.failed_suspends++;
 			goto out;
 		}
 	}
@@ -279,12 +306,9 @@ static int wil_resume_radio_off(struct wil6210_priv *wil)
 	return rc;
 }
 
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
 	int rc = 0;
-	struct net_device *ndev = wil_to_ndev(wil);
-	bool keep_radio_on = ndev->flags & IFF_UP &&
-			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
@@ -301,19 +325,12 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
 	wil_dbg_pm(wil, "suspend: %s => %d\n",
 		   is_runtime ? "runtime" : "system", rc);
 
-	if (!rc)
-		wil->suspend_stats.suspend_start_time = ktime_get();
-
 	return rc;
 }
 
-int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on)
 {
 	int rc = 0;
-	struct net_device *ndev = wil_to_ndev(wil);
-	bool keep_radio_on = ndev->flags & IFF_UP &&
-			     wil->keep_radio_on_during_sleep;
-	unsigned long long suspend_time_usec = 0;
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
@@ -331,20 +348,49 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
 	else
 		rc = wil_resume_radio_off(wil);
 
-	if (rc)
-		goto out;
-
-	suspend_time_usec =
-		ktime_to_us(ktime_sub(ktime_get(),
-				      wil->suspend_stats.suspend_start_time));
-	wil->suspend_stats.total_suspend_time += suspend_time_usec;
-	if (suspend_time_usec < wil->suspend_stats.min_suspend_time)
-		wil->suspend_stats.min_suspend_time = suspend_time_usec;
-	if (suspend_time_usec > wil->suspend_stats.max_suspend_time)
-		wil->suspend_stats.max_suspend_time = suspend_time_usec;
-
 out:
-	wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n",
-		   is_runtime ? "runtime" : "system", rc, suspend_time_usec);
+	wil_dbg_pm(wil, "resume: %s => %d\n", is_runtime ? "runtime" : "system",
+		   rc);
 	return rc;
 }
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_forbid(dev);
+	pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+	int rc;
+	struct device *dev = wil_to_dev(wil);
+
+	rc = pm_runtime_get_sync(dev);
+	if (rc < 0) {
+		wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
+		pm_runtime_put_noidle(dev);
+		return rc;
+	}
+
+	return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 1e340d0..cf27d97 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -82,18 +82,18 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
  */
 #define WIL_MAX_MPDU_OVERHEAD	(62)
 
-struct wil_suspend_stats {
+struct wil_suspend_count_stats {
 	unsigned long successful_suspends;
-	unsigned long failed_suspends;
 	unsigned long successful_resumes;
+	unsigned long failed_suspends;
 	unsigned long failed_resumes;
-	unsigned long rejected_by_device;
+};
+
+struct wil_suspend_stats {
+	struct wil_suspend_count_stats r_off;
+	struct wil_suspend_count_stats r_on;
+	unsigned long rejected_by_device; /* only radio on */
 	unsigned long rejected_by_host;
-	unsigned long long total_suspend_time;
-	unsigned long long min_suspend_time;
-	unsigned long long max_suspend_time;
-	ktime_t collection_start;
-	ktime_t suspend_start_time;
 };
 
 /* Calculate MAC buffer size for the firmware. It includes all overhead,
@@ -616,6 +616,16 @@ struct blink_on_off_time {
 	u32 off_ms;
 };
 
+struct wil_debugfs_iomem_data {
+	void *offset;
+	struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+	struct wil_debugfs_iomem_data *data_arr;
+	int iomem_data_count;
+};
+
 extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
 extern u8 led_id;
 extern u8 led_polarity;
@@ -708,6 +718,7 @@ struct wil6210_priv {
 	u8 abft_len;
 	u8 wakeup_trigger;
 	struct wil_suspend_stats suspend_stats;
+	struct wil_debugfs_data dbg_data;
 
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
@@ -732,9 +743,7 @@ struct wil6210_priv {
 	int fw_calib_result;
 
 #ifdef CONFIG_PM
-#ifdef CONFIG_PM_SLEEP
 	struct notifier_block pm_notify;
-#endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
 	bool suspend_resp_rcvd;
@@ -861,10 +870,12 @@ int wil_up(struct wil6210_priv *wil);
 int __wil_up(struct wil6210_priv *wil);
 int wil_down(struct wil6210_priv *wil);
 int __wil_down(struct wil6210_priv *wil);
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
 int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
 void wil_set_ethtoolops(struct net_device *ndev);
 
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr, u32 size);
 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
@@ -999,9 +1010,14 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
 			 bool load);
 bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
 
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
-int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime, bool keep_radio_on);
 bool wil_is_wmi_idle(struct wil6210_priv *wil);
 int wmi_resume(struct wil6210_priv *wil);
 int wmi_suspend(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ffdd2fa..8ace618 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -140,13 +140,15 @@ static u32 wmi_addr_remap(u32 x)
 /**
  * Check address validity for WMI buffer; remap if needed
  * @ptr - internal (linker) fw/ucode address
+ * @size - if non zero, validate the block does not
+ *  exceed the device memory (bar)
  *
  * Valid buffer should be DWORD aligned
  *
  * return address for accessing buffer from the host;
  * if buffer is not valid, return NULL.
  */
-void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+void __iomem *wmi_buffer_block(struct wil6210_priv *wil, __le32 ptr_, u32 size)
 {
 	u32 off;
 	u32 ptr = le32_to_cpu(ptr_);
@@ -161,10 +163,17 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
 	off = HOSTADDR(ptr);
 	if (off > wil->bar_size - 4)
 		return NULL;
+	if (size && ((off + size > wil->bar_size) || (off + size < off)))
+		return NULL;
 
 	return wil->csr + off;
 }
 
+void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
+{
+	return wmi_buffer_block(wil, ptr_, 0);
+}
+
 /**
  * Check address validity
  */
@@ -198,6 +207,232 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
 	return 0;
 }
 
+static const char *cmdid2name(u16 cmdid)
+{
+	switch (cmdid) {
+	case WMI_NOTIFY_REQ_CMDID:
+		return "WMI_NOTIFY_REQ_CMD";
+	case WMI_START_SCAN_CMDID:
+		return "WMI_START_SCAN_CMD";
+	case WMI_CONNECT_CMDID:
+		return "WMI_CONNECT_CMD";
+	case WMI_DISCONNECT_CMDID:
+		return "WMI_DISCONNECT_CMD";
+	case WMI_SW_TX_REQ_CMDID:
+		return "WMI_SW_TX_REQ_CMD";
+	case WMI_GET_RF_SECTOR_PARAMS_CMDID:
+		return "WMI_GET_RF_SECTOR_PARAMS_CMD";
+	case WMI_SET_RF_SECTOR_PARAMS_CMDID:
+		return "WMI_SET_RF_SECTOR_PARAMS_CMD";
+	case WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID:
+		return "WMI_GET_SELECTED_RF_SECTOR_INDEX_CMD";
+	case WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID:
+		return "WMI_SET_SELECTED_RF_SECTOR_INDEX_CMD";
+	case WMI_BRP_SET_ANT_LIMIT_CMDID:
+		return "WMI_BRP_SET_ANT_LIMIT_CMD";
+	case WMI_TOF_SESSION_START_CMDID:
+		return "WMI_TOF_SESSION_START_CMD";
+	case WMI_AOA_MEAS_CMDID:
+		return "WMI_AOA_MEAS_CMD";
+	case WMI_PMC_CMDID:
+		return "WMI_PMC_CMD";
+	case WMI_TOF_GET_TX_RX_OFFSET_CMDID:
+		return "WMI_TOF_GET_TX_RX_OFFSET_CMD";
+	case WMI_TOF_SET_TX_RX_OFFSET_CMDID:
+		return "WMI_TOF_SET_TX_RX_OFFSET_CMD";
+	case WMI_VRING_CFG_CMDID:
+		return "WMI_VRING_CFG_CMD";
+	case WMI_BCAST_VRING_CFG_CMDID:
+		return "WMI_BCAST_VRING_CFG_CMD";
+	case WMI_TRAFFIC_SUSPEND_CMDID:
+		return "WMI_TRAFFIC_SUSPEND_CMD";
+	case WMI_TRAFFIC_RESUME_CMDID:
+		return "WMI_TRAFFIC_RESUME_CMD";
+	case WMI_ECHO_CMDID:
+		return "WMI_ECHO_CMD";
+	case WMI_SET_MAC_ADDRESS_CMDID:
+		return "WMI_SET_MAC_ADDRESS_CMD";
+	case WMI_LED_CFG_CMDID:
+		return "WMI_LED_CFG_CMD";
+	case WMI_PCP_START_CMDID:
+		return "WMI_PCP_START_CMD";
+	case WMI_PCP_STOP_CMDID:
+		return "WMI_PCP_STOP_CMD";
+	case WMI_SET_SSID_CMDID:
+		return "WMI_SET_SSID_CMD";
+	case WMI_GET_SSID_CMDID:
+		return "WMI_GET_SSID_CMD";
+	case WMI_SET_PCP_CHANNEL_CMDID:
+		return "WMI_SET_PCP_CHANNEL_CMD";
+	case WMI_GET_PCP_CHANNEL_CMDID:
+		return "WMI_GET_PCP_CHANNEL_CMD";
+	case WMI_P2P_CFG_CMDID:
+		return "WMI_P2P_CFG_CMD";
+	case WMI_START_LISTEN_CMDID:
+		return "WMI_START_LISTEN_CMD";
+	case WMI_START_SEARCH_CMDID:
+		return "WMI_START_SEARCH_CMD";
+	case WMI_DISCOVERY_STOP_CMDID:
+		return "WMI_DISCOVERY_STOP_CMD";
+	case WMI_DELETE_CIPHER_KEY_CMDID:
+		return "WMI_DELETE_CIPHER_KEY_CMD";
+	case WMI_ADD_CIPHER_KEY_CMDID:
+		return "WMI_ADD_CIPHER_KEY_CMD";
+	case WMI_SET_APPIE_CMDID:
+		return "WMI_SET_APPIE_CMD";
+	case WMI_CFG_RX_CHAIN_CMDID:
+		return "WMI_CFG_RX_CHAIN_CMD";
+	case WMI_TEMP_SENSE_CMDID:
+		return "WMI_TEMP_SENSE_CMD";
+	case WMI_DEL_STA_CMDID:
+		return "WMI_DEL_STA_CMD";
+	case WMI_DISCONNECT_STA_CMDID:
+		return "WMI_DISCONNECT_STA_CMD";
+	case WMI_VRING_BA_EN_CMDID:
+		return "WMI_VRING_BA_EN_CMD";
+	case WMI_VRING_BA_DIS_CMDID:
+		return "WMI_VRING_BA_DIS_CMD";
+	case WMI_RCP_DELBA_CMDID:
+		return "WMI_RCP_DELBA_CMD";
+	case WMI_RCP_ADDBA_RESP_CMDID:
+		return "WMI_RCP_ADDBA_RESP_CMD";
+	case WMI_PS_DEV_PROFILE_CFG_CMDID:
+		return "WMI_PS_DEV_PROFILE_CFG_CMD";
+	case WMI_SET_MGMT_RETRY_LIMIT_CMDID:
+		return "WMI_SET_MGMT_RETRY_LIMIT_CMD";
+	case WMI_GET_MGMT_RETRY_LIMIT_CMDID:
+		return "WMI_GET_MGMT_RETRY_LIMIT_CMD";
+	case WMI_ABORT_SCAN_CMDID:
+		return "WMI_ABORT_SCAN_CMD";
+	case WMI_NEW_STA_CMDID:
+		return "WMI_NEW_STA_CMD";
+	case WMI_SET_THERMAL_THROTTLING_CFG_CMDID:
+		return "WMI_SET_THERMAL_THROTTLING_CFG_CMD";
+	case WMI_GET_THERMAL_THROTTLING_CFG_CMDID:
+		return "WMI_GET_THERMAL_THROTTLING_CFG_CMD";
+	case WMI_LINK_MAINTAIN_CFG_WRITE_CMDID:
+		return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
+	case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
+		return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
+	default:
+		return "Untracked CMD";
+	}
+}
+
+static const char *eventid2name(u16 eventid)
+{
+	switch (eventid) {
+	case WMI_NOTIFY_REQ_DONE_EVENTID:
+		return "WMI_NOTIFY_REQ_DONE_EVENT";
+	case WMI_DISCONNECT_EVENTID:
+		return "WMI_DISCONNECT_EVENT";
+	case WMI_SW_TX_COMPLETE_EVENTID:
+		return "WMI_SW_TX_COMPLETE_EVENT";
+	case WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID:
+		return "WMI_GET_RF_SECTOR_PARAMS_DONE_EVENT";
+	case WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID:
+		return "WMI_SET_RF_SECTOR_PARAMS_DONE_EVENT";
+	case WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+		return "WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+	case WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID:
+		return "WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENT";
+	case WMI_BRP_SET_ANT_LIMIT_EVENTID:
+		return "WMI_BRP_SET_ANT_LIMIT_EVENT";
+	case WMI_FW_READY_EVENTID:
+		return "WMI_FW_READY_EVENT";
+	case WMI_TRAFFIC_RESUME_EVENTID:
+		return "WMI_TRAFFIC_RESUME_EVENT";
+	case WMI_TOF_GET_TX_RX_OFFSET_EVENTID:
+		return "WMI_TOF_GET_TX_RX_OFFSET_EVENT";
+	case WMI_TOF_SET_TX_RX_OFFSET_EVENTID:
+		return "WMI_TOF_SET_TX_RX_OFFSET_EVENT";
+	case WMI_VRING_CFG_DONE_EVENTID:
+		return "WMI_VRING_CFG_DONE_EVENT";
+	case WMI_READY_EVENTID:
+		return "WMI_READY_EVENT";
+	case WMI_RX_MGMT_PACKET_EVENTID:
+		return "WMI_RX_MGMT_PACKET_EVENT";
+	case WMI_TX_MGMT_PACKET_EVENTID:
+		return "WMI_TX_MGMT_PACKET_EVENT";
+	case WMI_SCAN_COMPLETE_EVENTID:
+		return "WMI_SCAN_COMPLETE_EVENT";
+	case WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID:
+		return "WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT";
+	case WMI_CONNECT_EVENTID:
+		return "WMI_CONNECT_EVENT";
+	case WMI_EAPOL_RX_EVENTID:
+		return "WMI_EAPOL_RX_EVENT";
+	case WMI_BA_STATUS_EVENTID:
+		return "WMI_BA_STATUS_EVENT";
+	case WMI_RCP_ADDBA_REQ_EVENTID:
+		return "WMI_RCP_ADDBA_REQ_EVENT";
+	case WMI_DELBA_EVENTID:
+		return "WMI_DELBA_EVENT";
+	case WMI_VRING_EN_EVENTID:
+		return "WMI_VRING_EN_EVENT";
+	case WMI_DATA_PORT_OPEN_EVENTID:
+		return "WMI_DATA_PORT_OPEN_EVENT";
+	case WMI_AOA_MEAS_EVENTID:
+		return "WMI_AOA_MEAS_EVENT";
+	case WMI_TOF_SESSION_END_EVENTID:
+		return "WMI_TOF_SESSION_END_EVENT";
+	case WMI_TOF_GET_CAPABILITIES_EVENTID:
+		return "WMI_TOF_GET_CAPABILITIES_EVENT";
+	case WMI_TOF_SET_LCR_EVENTID:
+		return "WMI_TOF_SET_LCR_EVENT";
+	case WMI_TOF_SET_LCI_EVENTID:
+		return "WMI_TOF_SET_LCI_EVENT";
+	case WMI_TOF_FTM_PER_DEST_RES_EVENTID:
+		return "WMI_TOF_FTM_PER_DEST_RES_EVENT";
+	case WMI_TOF_CHANNEL_INFO_EVENTID:
+		return "WMI_TOF_CHANNEL_INFO_EVENT";
+	case WMI_TRAFFIC_SUSPEND_EVENTID:
+		return "WMI_TRAFFIC_SUSPEND_EVENT";
+	case WMI_ECHO_RSP_EVENTID:
+		return "WMI_ECHO_RSP_EVENT";
+	case WMI_LED_CFG_DONE_EVENTID:
+		return "WMI_LED_CFG_DONE_EVENT";
+	case WMI_PCP_STARTED_EVENTID:
+		return "WMI_PCP_STARTED_EVENT";
+	case WMI_PCP_STOPPED_EVENTID:
+		return "WMI_PCP_STOPPED_EVENT";
+	case WMI_GET_SSID_EVENTID:
+		return "WMI_GET_SSID_EVENT";
+	case WMI_GET_PCP_CHANNEL_EVENTID:
+		return "WMI_GET_PCP_CHANNEL_EVENT";
+	case WMI_P2P_CFG_DONE_EVENTID:
+		return "WMI_P2P_CFG_DONE_EVENT";
+	case WMI_LISTEN_STARTED_EVENTID:
+		return "WMI_LISTEN_STARTED_EVENT";
+	case WMI_SEARCH_STARTED_EVENTID:
+		return "WMI_SEARCH_STARTED_EVENT";
+	case WMI_DISCOVERY_STOPPED_EVENTID:
+		return "WMI_DISCOVERY_STOPPED_EVENT";
+	case WMI_CFG_RX_CHAIN_DONE_EVENTID:
+		return "WMI_CFG_RX_CHAIN_DONE_EVENT";
+	case WMI_TEMP_SENSE_DONE_EVENTID:
+		return "WMI_TEMP_SENSE_DONE_EVENT";
+	case WMI_RCP_ADDBA_RESP_SENT_EVENTID:
+		return "WMI_RCP_ADDBA_RESP_SENT_EVENT";
+	case WMI_PS_DEV_PROFILE_CFG_EVENTID:
+		return "WMI_PS_DEV_PROFILE_CFG_EVENT";
+	case WMI_SET_MGMT_RETRY_LIMIT_EVENTID:
+		return "WMI_SET_MGMT_RETRY_LIMIT_EVENT";
+	case WMI_GET_MGMT_RETRY_LIMIT_EVENTID:
+		return "WMI_GET_MGMT_RETRY_LIMIT_EVENT";
+	case WMI_SET_THERMAL_THROTTLING_CFG_EVENTID:
+		return "WMI_SET_THERMAL_THROTTLING_CFG_EVENT";
+	case WMI_GET_THERMAL_THROTTLING_CFG_EVENTID:
+		return "WMI_GET_THERMAL_THROTTLING_CFG_EVENT";
+	case WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID:
+		return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
+	case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
+		return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
+	default:
+		return "Untracked EVENT";
+	}
+}
+
 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 {
 	struct {
@@ -222,7 +457,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 	uint retry;
 	int rc = 0;
 
-	if (sizeof(cmd) + len > r->entry_size) {
+	if (len > r->entry_size - sizeof(cmd)) {
 		wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
 			(int)(sizeof(cmd) + len), r->entry_size);
 		return -ERANGE;
@@ -294,7 +529,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
 	}
 	cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
 	/* set command */
-	wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
+	wil_dbg_wmi(wil, "sending %s (0x%04x) [%d]\n",
+		    cmdid2name(cmdid), cmdid, len);
 	wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
 			 sizeof(cmd), true);
 	wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
@@ -963,8 +1199,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
 			}
 			spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 
-			wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
-				    id, wmi->mid, tstamp);
+			wil_dbg_wmi(wil, "recv %s (0x%04x) MID %d @%d msec\n",
+				    eventid2name(id), id, wmi->mid, tstamp);
 			trace_wil6210_wmi_event(wmi, &wmi[1],
 						len - sizeof(*wmi));
 		}
@@ -1380,8 +1616,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
 	};
 	int rc;
 	u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
-	struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+	struct wmi_set_appie_cmd *cmd;
 
+	if (len < ie_len) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	cmd = kzalloc(len, GFP_KERNEL);
 	if (!cmd) {
 		rc = -ENOMEM;
 		goto out;
@@ -1801,6 +2043,16 @@ void wmi_event_flush(struct wil6210_priv *wil)
 	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 }
 
+static const char *suspend_status2name(u8 status)
+{
+	switch (status) {
+	case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
+		return "LINK_NOT_IDLE";
+	default:
+		return "Untracked status";
+	}
+}
+
 int wmi_suspend(struct wil6210_priv *wil)
 {
 	int rc;
@@ -1816,7 +2068,7 @@ int wmi_suspend(struct wil6210_priv *wil)
 	wil->suspend_resp_rcvd = false;
 	wil->suspend_resp_comp = false;
 
-	reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+	reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE;
 
 	rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
 		      WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
@@ -1848,8 +2100,9 @@ int wmi_suspend(struct wil6210_priv *wil)
 	}
 
 	wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
-	if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
-		wil_dbg_pm(wil, "device rejected the suspend\n");
+	if (reply.evt.status != WMI_TRAFFIC_SUSPEND_APPROVED) {
+		wil_dbg_pm(wil, "device rejected the suspend, %s\n",
+			   suspend_status2name(reply.evt.status));
 		wil->suspend_stats.rejected_by_device++;
 	}
 	rc = reply.evt.status;
@@ -1861,21 +2114,50 @@ int wmi_suspend(struct wil6210_priv *wil)
 	return rc;
 }
 
+static void resume_triggers2string(u32 triggers, char *string, int str_size)
+{
+	string[0] = '\0';
+
+	if (!triggers) {
+		strlcat(string, " UNKNOWN", str_size);
+		return;
+	}
+
+	if (triggers & WMI_RESUME_TRIGGER_HOST)
+		strlcat(string, " HOST", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_UCAST_RX)
+		strlcat(string, " UCAST_RX", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_BCAST_RX)
+		strlcat(string, " BCAST_RX", str_size);
+
+	if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
+		strlcat(string, " WMI_EVT", str_size);
+}
+
 int wmi_resume(struct wil6210_priv *wil)
 {
 	int rc;
+	char string[100];
 	struct {
 		struct wmi_cmd_hdr wmi;
 		struct wmi_traffic_resume_event evt;
 	} __packed reply;
 
 	reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+	reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN;
 
 	rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
 		      WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
 		      WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
 	if (rc)
 		return rc;
+	resume_triggers2string(le32_to_cpu(reply.evt.resume_triggers), string,
+			       sizeof(string));
+	wil_dbg_pm(wil, "device resume %s, resume triggers:%s (0x%x)\n",
+		   reply.evt.status ? "failed" : "passed", string,
+		   le32_to_cpu(reply.evt.resume_triggers));
 
 	return reply.evt.status;
 }
@@ -1906,8 +2188,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
 		void *evt_data = (void *)(&wmi[1]);
 		u16 id = le16_to_cpu(wmi->command_id);
 
-		wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
-			    id, wil->reply_id);
+		wil_dbg_wmi(wil, "Handle %s (0x%04x) (reply_id 0x%04x)\n",
+			    eventid2name(id), id, wil->reply_id);
 		/* check if someone waits for this event */
 		if (wil->reply_id && wil->reply_id == id) {
 			WARN_ON(wil->reply_buf);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 5263ee71..d9e220a 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -2267,8 +2267,8 @@ struct wmi_link_maintain_cfg_read_done_event {
 } __packed;
 
 enum wmi_traffic_suspend_status {
-	WMI_TRAFFIC_SUSPEND_APPROVED	= 0x0,
-	WMI_TRAFFIC_SUSPEND_REJECTED	= 0x1,
+	WMI_TRAFFIC_SUSPEND_APPROVED			= 0x0,
+	WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE	= 0x1,
 };
 
 /* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -2282,10 +2282,21 @@ enum wmi_traffic_resume_status {
 	WMI_TRAFFIC_RESUME_FAILED	= 0x1,
 };
 
+enum wmi_resume_trigger {
+	WMI_RESUME_TRIGGER_UNKNOWN	= 0x0,
+	WMI_RESUME_TRIGGER_HOST		= 0x1,
+	WMI_RESUME_TRIGGER_UCAST_RX	= 0x2,
+	WMI_RESUME_TRIGGER_BCAST_RX	= 0x4,
+	WMI_RESUME_TRIGGER_WMI_EVT	= 0x8,
+};
+
 /* WMI_TRAFFIC_RESUME_EVENTID */
 struct wmi_traffic_resume_event {
-	/* enum wmi_traffic_resume_status_e */
+	/* enum wmi_traffic_resume_status */
 	u8 status;
+	u8 reserved[3];
+	/* enum wmi_resume_trigger bitmap */
+	__le32 resume_triggers;
 } __packed;
 
 /* Power Save command completion status codes */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index cd58732..f8b47c1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -137,27 +137,27 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
 		if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
 			/* assign GPIO to SDIO core */
 			addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
-			gpiocontrol = brcmf_sdiod_regrl(sdiodev, addr, &ret);
+			gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
 			gpiocontrol |= 0x2;
-			brcmf_sdiod_regwl(sdiodev, addr, gpiocontrol, &ret);
+			brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
 
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_SELECT, 0xf,
-					  &ret);
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
+					   0xf, &ret);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
 		}
 
 		/* must configure SDIO_CCCR_IENx to enable irq */
-		data = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
+		data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
 		data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
 
 		/* redirect, configure and enable io for interrupt signal */
-		data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
+		data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
 		if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
-			data |= SDIO_SEPINT_ACT_HI;
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
-
+			data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
+				     data, &ret);
 		sdio_release_host(sdiodev->func[1]);
 	} else {
 		brcmf_dbg(SDIO, "Entering\n");
@@ -183,8 +183,8 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
 
 		pdata = &sdiodev->settings->bus.sdio;
 		sdio_claim_host(sdiodev->func[1]);
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
-		brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
+		brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
 		sdio_release_host(sdiodev->func[1]);
 
 		sdiodev->oob_irq_requested = false;
@@ -230,244 +230,69 @@ void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
 	sdiodev->state = state;
 }
 
-static inline int brcmf_sdiod_f0_writeb(struct sdio_func *func,
-					uint regaddr, u8 byte)
+static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
+					    u32 addr)
 {
-	int err_ret;
-
-	/*
-	 * Can only directly write to some F0 registers.
-	 * Handle CCCR_IENx and CCCR_ABORT command
-	 * as a special case.
-	 */
-	if ((regaddr == SDIO_CCCR_ABORT) ||
-	    (regaddr == SDIO_CCCR_IENx))
-		sdio_writeb(func, byte, regaddr, &err_ret);
-	else
-		sdio_f0_writeb(func, byte, regaddr, &err_ret);
-
-	return err_ret;
-}
-
-static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
-				    u32 addr, u8 regsz, void *data, bool write)
-{
-	struct sdio_func *func;
-	int ret = -EINVAL;
-
-	brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
-		  write, fn, addr, regsz);
-
-	/* only allow byte access on F0 */
-	if (WARN_ON(regsz > 1 && !fn))
-		return -EINVAL;
-	func = sdiodev->func[fn];
-
-	switch (regsz) {
-	case sizeof(u8):
-		if (write) {
-			if (fn)
-				sdio_writeb(func, *(u8 *)data, addr, &ret);
-			else
-				ret = brcmf_sdiod_f0_writeb(func, addr,
-							    *(u8 *)data);
-		} else {
-			if (fn)
-				*(u8 *)data = sdio_readb(func, addr, &ret);
-			else
-				*(u8 *)data = sdio_f0_readb(func, addr, &ret);
-		}
-		break;
-	case sizeof(u16):
-		if (write)
-			sdio_writew(func, *(u16 *)data, addr, &ret);
-		else
-			*(u16 *)data = sdio_readw(func, addr, &ret);
-		break;
-	case sizeof(u32):
-		if (write)
-			sdio_writel(func, *(u32 *)data, addr, &ret);
-		else
-			*(u32 *)data = sdio_readl(func, addr, &ret);
-		break;
-	default:
-		brcmf_err("invalid size: %d\n", regsz);
-		break;
-	}
-
-	if (ret)
-		brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
-			  write ? "write" : "read", fn, addr, ret);
-
-	return ret;
-}
-
-static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-				   u8 regsz, void *data, bool write)
-{
-	u8 func;
-	s32 retry = 0;
-	int ret;
-
-	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
-		return -ENOMEDIUM;
-
-	/*
-	 * figure out how to read the register based on address range
-	 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
-	 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
-	 * The rest: function 1 silicon backplane core registers
-	 */
-	if ((addr & ~REG_F0_REG_MASK) == 0)
-		func = SDIO_FUNC_0;
-	else
-		func = SDIO_FUNC_1;
-
-	do {
-		if (!write)
-			memset(data, 0, regsz);
-		/* for retry wait for 1 ms till bus get settled down */
-		if (retry)
-			usleep_range(1000, 2000);
-		ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
-					       data, write);
-	} while (ret != 0 && ret != -ENOMEDIUM &&
-		 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
-
-	if (ret == -ENOMEDIUM)
-		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
-	else if (ret != 0) {
-		/*
-		 * SleepCSR register access can fail when
-		 * waking up the device so reduce this noise
-		 * in the logs.
-		 */
-		if (addr != SBSDIO_FUNC1_SLEEPCSR)
-			brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
-				  write ? "write" : "read", func, addr, ret);
-		else
-			brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
-				  write ? "write" : "read", func, addr, ret);
-	}
-	return ret;
-}
-
-static int
-brcmf_sdiod_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
-{
+	u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
 	int err = 0, i;
-	u8 addr[3];
 
-	if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM)
-		return -ENOMEDIUM;
+	if (bar0 == sdiodev->sbwad)
+		return 0;
 
-	addr[0] = (address >> 8) & SBSDIO_SBADDRLOW_MASK;
-	addr[1] = (address >> 16) & SBSDIO_SBADDRMID_MASK;
-	addr[2] = (address >> 24) & SBSDIO_SBADDRHIGH_MASK;
+	v = bar0 >> 8;
 
-	for (i = 0; i < 3; i++) {
-		err = brcmf_sdiod_regrw_helper(sdiodev,
-					       SBSDIO_FUNC1_SBADDRLOW + i,
-					       sizeof(u8), &addr[i], true);
-		if (err) {
-			brcmf_err("failed at addr: 0x%0x\n",
-				  SBSDIO_FUNC1_SBADDRLOW + i);
-			break;
-		}
-	}
+	for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
+				   v & 0xff, &err);
+
+	if (!err)
+		sdiodev->sbwad = bar0;
 
 	return err;
 }
 
-static int
-brcmf_sdiod_addrprep(struct brcmf_sdio_dev *sdiodev, uint width, u32 *addr)
-{
-	uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
-	int err = 0;
-
-	if (bar0 != sdiodev->sbwad) {
-		err = brcmf_sdiod_set_sbaddr_window(sdiodev, bar0);
-		if (err)
-			return err;
-
-		sdiodev->sbwad = bar0;
-	}
-
-	*addr &= SBSDIO_SB_OFT_ADDR_MASK;
-
-	if (width == 4)
-		*addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
-
-	return 0;
-}
-
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
-{
-	u8 data;
-	int retval;
-
-	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  false);
-	brcmf_dbg(SDIO, "data:0x%02x\n", data);
-
-	if (ret)
-		*ret = retval;
-
-	return data;
-}
-
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
 {
 	u32 data = 0;
 	int retval;
 
-	brcmf_dbg(SDIO, "addr:0x%08x\n", addr);
-	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (retval)
-		goto done;
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  false);
-	brcmf_dbg(SDIO, "data:0x%08x\n", data);
+		goto out;
 
-done:
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	data = sdio_readl(sdiodev->func[1], addr, &retval);
+
+out:
 	if (ret)
 		*ret = retval;
 
 	return data;
 }
 
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
-		      u8 data, int *ret)
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
+			u32 data, int *ret)
 {
 	int retval;
 
-	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%02x\n", addr, data);
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  true);
-	if (ret)
-		*ret = retval;
-}
-
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
-		      u32 data, int *ret)
-{
-	int retval;
-
-	brcmf_dbg(SDIO, "addr:0x%08x, data:0x%08x\n", addr, data);
-	retval = brcmf_sdiod_addrprep(sdiodev, sizeof(data), &addr);
+	retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (retval)
-		goto done;
-	retval = brcmf_sdiod_regrw_helper(sdiodev, addr, sizeof(data), &data,
-					  true);
+		goto out;
 
-done:
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	sdio_writel(sdiodev->func[1], data, addr, &retval);
+
+out:
 	if (ret)
 		*ret = retval;
 }
 
-static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
-			     bool write, u32 addr, struct sk_buff *pkt)
+static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev, uint fn,
+				 u32 addr, struct sk_buff *pkt)
 {
 	unsigned int req_sz;
 	int err;
@@ -476,18 +301,36 @@ static int brcmf_sdiod_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
 	req_sz = pkt->len + 3;
 	req_sz &= (uint)~3;
 
-	if (write)
-		err = sdio_memcpy_toio(sdiodev->func[fn], addr,
-				       ((u8 *)(pkt->data)), req_sz);
-	else if (fn == 1)
-		err = sdio_memcpy_fromio(sdiodev->func[fn], ((u8 *)(pkt->data)),
-					 addr, req_sz);
+	if (fn == 1)
+		err = sdio_memcpy_fromio(sdiodev->func[fn],
+					 ((u8 *)(pkt->data)), addr, req_sz);
 	else
 		/* function 2 read is FIFO operation */
-		err = sdio_readsb(sdiodev->func[fn], ((u8 *)(pkt->data)), addr,
-				  req_sz);
+		err = sdio_readsb(sdiodev->func[fn],
+				  ((u8 *)(pkt->data)), addr, req_sz);
+
 	if (err == -ENOMEDIUM)
 		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
+	return err;
+}
+
+static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
+				  u32 addr, struct sk_buff *pkt)
+{
+	unsigned int req_sz;
+	int err;
+
+	/* Single skb use the standard mmc interface */
+	req_sz = pkt->len + 3;
+	req_sz &= (uint)~3;
+
+	err = sdio_memcpy_toio(sdiodev->func[fn], addr,
+			       ((u8 *)(pkt->data)), req_sz);
+
+	if (err == -ENOMEDIUM)
+		brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+
 	return err;
 }
 
@@ -691,11 +534,14 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
 
 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (err)
 		goto done;
 
-	err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, pkt);
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr, pkt);
 
 done:
 	return err;
@@ -712,19 +558,22 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
 		  addr, pktq->qlen);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (err)
 		goto done;
 
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
 	if (pktq->qlen == 1)
-		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
-					 pktq->next);
+		err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
+					    pktq->next);
 	else if (!sdiodev->sg_support) {
 		glom_skb = brcmu_pkt_buf_get_skb(totlen);
 		if (!glom_skb)
 			return -ENOMEM;
-		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
-					 glom_skb);
+		err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
+					    glom_skb);
 		if (err)
 			goto done;
 
@@ -748,6 +597,7 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 	int err;
 
 	mypkt = brcmu_pkt_buf_get_skb(nbytes);
+
 	if (!mypkt) {
 		brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
 			  nbytes);
@@ -756,15 +606,19 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
 
 	memcpy(mypkt->data, buf, nbytes);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+	if (err)
+		return err;
+
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
 	if (!err)
-		err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true, addr,
-					 mypkt);
+		err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2, addr, mypkt);
 
 	brcmu_pkt_buf_free_skb(mypkt);
-	return err;
 
+	return err;
 }
 
 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
@@ -776,20 +630,24 @@ int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
 
 	brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
 
-	err = brcmf_sdiod_addrprep(sdiodev, 4, &addr);
+	err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
 	if (err)
 		return err;
 
-	if (pktq->qlen == 1 || !sdiodev->sg_support)
+	addr &= SBSDIO_SB_OFT_ADDR_MASK;
+	addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+	if (pktq->qlen == 1 || !sdiodev->sg_support) {
 		skb_queue_walk(pktq, skb) {
-			err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, true,
-						 addr, skb);
+			err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2,
+						     addr, skb);
 			if (err)
 				break;
 		}
-	else
+	} else {
 		err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
 					    pktq);
+	}
 
 	return err;
 }
@@ -798,7 +656,7 @@ int
 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 		  u8 *data, uint size)
 {
-	int bcmerror = 0;
+	int err = 0;
 	struct sk_buff *pkt;
 	u32 sdaddr;
 	uint dsize;
@@ -823,8 +681,8 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 	/* Do the transfer(s) */
 	while (size) {
 		/* Set the backplane window to include the start address */
-		bcmerror = brcmf_sdiod_set_sbaddr_window(sdiodev, address);
-		if (bcmerror)
+		err = brcmf_sdiod_set_backplane_window(sdiodev, address);
+		if (err)
 			break;
 
 		brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
@@ -835,11 +693,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 		sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
 
 		skb_put(pkt, dsize);
-		if (write)
+
+		if (write) {
 			memcpy(pkt->data, data, dsize);
-		bcmerror = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_1, write,
-					      sdaddr, pkt);
-		if (bcmerror) {
+			err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_1,
+						     sdaddr, pkt);
+		} else {
+			err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_1,
+						    sdaddr, pkt);
+		}
+
+		if (err) {
 			brcmf_err("membytes transfer failed\n");
 			break;
 		}
@@ -859,24 +723,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
 	dev_kfree_skb(pkt);
 
-	/* Return the window to backplane enumeration space for core access */
-	if (brcmf_sdiod_set_sbaddr_window(sdiodev, sdiodev->sbwad))
-		brcmf_err("FAILED to set window back to 0x%x\n",
-			  sdiodev->sbwad);
-
 	sdio_release_host(sdiodev->func[1]);
 
-	return bcmerror;
+	return err;
 }
 
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn)
 {
-	char t_func = (char)fn;
 	brcmf_dbg(SDIO, "Enter\n");
 
-	/* issue abort cmd52 command through F0 */
-	brcmf_sdiod_request_data(sdiodev, SDIO_FUNC_0, SDIO_CCCR_ABORT,
-				 sizeof(t_func), &t_func, true);
+	/* Issue abort cmd52 command through F0 */
+	brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, fn, NULL);
 
 	brcmf_dbg(SDIO, "Exit\n");
 	return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 53ae302..47de35a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -130,13 +130,19 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
 	}
 }
 
+#define MAX_CAPS_BUFFER_SIZE	512
 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp)
 {
-	char caps[256];
+	char caps[MAX_CAPS_BUFFER_SIZE];
 	enum brcmf_feat_id id;
-	int i;
+	int i, err;
 
-	brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
+	err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps));
+	if (err) {
+		brcmf_err("could not get firmware cap (%d)\n", err);
+		return;
+	}
+
 	brcmf_dbg(INFO, "[ %s]\n", caps);
 
 	for (i = 0; i < ARRAY_SIZE(brcmf_fwcap_map); i++) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index cdf9e41..5cc2d69 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -159,8 +159,8 @@ struct rte_console {
 /* manfid tuple length, include tuple, link bytes */
 #define SBSDIO_CIS_MANFID_TUPLE_LEN	6
 
-#define CORE_BUS_REG(base, field) \
-		(base + offsetof(struct sdpcmd_regs, field))
+#define SD_REG(field) \
+		(offsetof(struct sdpcmd_regs, field))
 
 /* SDIO function 1 register CHIPCLKCSR */
 /* Force ALP request to backplane */
@@ -436,6 +436,7 @@ struct brcmf_sdio_count {
 struct brcmf_sdio {
 	struct brcmf_sdio_dev *sdiodev;	/* sdio device handler */
 	struct brcmf_chip *ci;	/* Chip info struct */
+	struct brcmf_core *sdio_core; /* sdio core info struct */
 
 	u32 hostintmask;	/* Copy of Host Interrupt Mask */
 	atomic_t intstatus;	/* Intstatus bits (events) pending */
@@ -665,22 +666,20 @@ static bool data_ok(struct brcmf_sdio *bus)
  */
 static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
 {
-	struct brcmf_core *core;
+	struct brcmf_core *core = bus->sdio_core;
 	int ret;
 
-	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-	*regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
+	*regvar = brcmf_sdiod_readl(bus->sdiodev, core->base + offset, &ret);
 
 	return ret;
 }
 
 static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
 {
-	struct brcmf_core *core;
+	struct brcmf_core *core = bus->sdio_core;
 	int ret;
 
-	core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-	brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
+	brcmf_sdiod_writel(bus->sdiodev, core->base + reg_offset, regval, &ret);
 
 	return ret;
 }
@@ -697,8 +696,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 
 	wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
 	/* 1st KSO write goes to AOS wake up core if device is asleep  */
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-			  wr_val, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
 
 	if (on) {
 		/* device WAKEUP through KSO:
@@ -724,7 +722,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 		 * just one write attempt may fail,
 		 * read it back until it matches written value
 		 */
-		rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+		rd_val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
 					   &err);
 		if (!err) {
 			if ((rd_val & bmask) == cmp_val)
@@ -734,9 +732,11 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
 		/* bail out upon subsequent access errors */
 		if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
 			break;
+
 		udelay(KSO_WAIT_US);
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-				  wr_val, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val,
+				   &err);
+
 	} while (try_cnt++ < MAX_KSO_ATTEMPTS);
 
 	if (try_cnt > 2)
@@ -772,15 +772,15 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 		clkreq =
 		    bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
 
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  clkreq, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   clkreq, &err);
 		if (err) {
 			brcmf_err("HT Avail request error: %d\n", err);
 			return -EBADE;
 		}
 
 		/* Check current status */
-		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+		clkctl = brcmf_sdiod_readb(bus->sdiodev,
 					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
 		if (err) {
 			brcmf_err("HT Avail read error: %d\n", err);
@@ -790,35 +790,34 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 		/* Go to pending and await interrupt if appropriate */
 		if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) {
 			/* Allow only clock-available interrupt */
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			if (err) {
-				brcmf_err("Devctl error setting CA: %d\n",
-					  err);
+				brcmf_err("Devctl error setting CA: %d\n", err);
 				return -EBADE;
 			}
 
 			devctl |= SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   devctl, &err);
 			brcmf_dbg(SDIO, "CLKCTL: set PENDING\n");
 			bus->clkstate = CLK_PENDING;
 
 			return 0;
 		} else if (bus->clkstate == CLK_PENDING) {
 			/* Cancel CA-only interrupt filter */
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   devctl, &err);
 		}
 
 		/* Otherwise, wait here (polling) for HT Avail */
 		timeout = jiffies +
 			  msecs_to_jiffies(PMU_MAX_TRANSITION_DLY/1000);
 		while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
-			clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+			clkctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_FUNC1_CHIPCLKCSR,
 						   &err);
 			if (time_after(jiffies, timeout))
@@ -852,16 +851,16 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
 
 		if (bus->clkstate == CLK_PENDING) {
 			/* Cancel CA-only interrupt filter */
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   devctl, &err);
 		}
 
 		bus->clkstate = CLK_SDONLY;
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  clkreq, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   clkreq, &err);
 		brcmf_dbg(SDIO, "CLKCTL: turned OFF\n");
 		if (err) {
 			brcmf_err("Failed access turning clock off: %d\n",
@@ -951,14 +950,14 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 
 		/* Going to sleep */
 		if (sleep) {
-			clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+			clkcsr = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_FUNC1_CHIPCLKCSR,
 						   &err);
 			if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
 				brcmf_dbg(SDIO, "no clock, set ALP\n");
-				brcmf_sdiod_regwb(bus->sdiodev,
-						  SBSDIO_FUNC1_CHIPCLKCSR,
-						  SBSDIO_ALP_AVAIL_REQ, &err);
+				brcmf_sdiod_writeb(bus->sdiodev,
+						   SBSDIO_FUNC1_CHIPCLKCSR,
+						   SBSDIO_ALP_AVAIL_REQ, &err);
 			}
 			err = brcmf_sdio_kso_control(bus, false);
 		} else {
@@ -1087,12 +1086,10 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 	brcmf_dbg(SDIO, "Enter\n");
 
 	/* Read mailbox data and ack that we did so */
-	ret = r_sdreg32(bus, &hmb_data,
-			offsetof(struct sdpcmd_regs, tohostmailboxdata));
+	ret = r_sdreg32(bus, &hmb_data,	SD_REG(tohostmailboxdata));
 
 	if (ret == 0)
-		w_sdreg32(bus, SMB_INT_ACK,
-			  offsetof(struct sdpcmd_regs, tosbmailbox));
+		w_sdreg32(bus, SMB_INT_ACK, SD_REG(tosbmailbox));
 	bus->sdcnt.f1regdata += 2;
 
 	/* dongle indicates the firmware has halted/crashed */
@@ -1178,16 +1175,16 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 	if (abort)
 		brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
 
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-			  SFC_RF_TERM, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
+			   &err);
 	bus->sdcnt.f1regdata++;
 
 	/* Wait until the packet has been flushed (device/FIFO stable) */
 	for (lastrbc = retries = 0xffff; retries > 0; retries--) {
-		hi = brcmf_sdiod_regrb(bus->sdiodev,
-				       SBSDIO_FUNC1_RFRAMEBCHI, &err);
-		lo = brcmf_sdiod_regrb(bus->sdiodev,
-				       SBSDIO_FUNC1_RFRAMEBCLO, &err);
+		hi = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI,
+				       &err);
+		lo = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO,
+				       &err);
 		bus->sdcnt.f1regdata += 2;
 
 		if ((hi == 0) && (lo == 0))
@@ -1207,8 +1204,7 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
 
 	if (rtx) {
 		bus->sdcnt.rxrtx++;
-		err = w_sdreg32(bus, SMB_NAK,
-				offsetof(struct sdpcmd_regs, tosbmailbox));
+		err = w_sdreg32(bus, SMB_NAK, SD_REG(tosbmailbox));
 
 		bus->sdcnt.f1regdata++;
 		if (err == 0)
@@ -1229,12 +1225,12 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
 	bus->sdcnt.tx_sderrs++;
 
 	brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
 	bus->sdcnt.f1regdata++;
 
 	for (i = 0; i < 3; i++) {
-		hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
-		lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+		hi = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+		lo = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
 		bus->sdcnt.f1regdata += 2;
 		if ((hi == 0) && (lo == 0))
 			break;
@@ -2333,9 +2329,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
 		if (!bus->intr) {
 			/* Check device status, signal pending interrupt */
 			sdio_claim_host(bus->sdiodev->func[1]);
-			ret = r_sdreg32(bus, &intstatus,
-					offsetof(struct sdpcmd_regs,
-						 intstatus));
+			ret = r_sdreg32(bus, &intstatus, SD_REG(intstatus));
 			sdio_release_host(bus->sdiodev->func[1]);
 			bus->sdcnt.f2txdata++;
 			if (ret != 0)
@@ -2441,16 +2435,16 @@ static void brcmf_sdio_bus_stop(struct device *dev)
 		brcmf_sdio_bus_sleep(bus, false, false);
 
 		/* Disable and clear interrupts at the chip level also */
-		w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask));
+		w_sdreg32(bus, 0, SD_REG(hostintmask));
 		local_hostintmask = bus->hostintmask;
 		bus->hostintmask = 0;
 
 		/* Force backplane clocks to assure F2 interrupt propagates */
-		saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+		saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
 					    &err);
 		if (!err)
-			brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-					  (saveclk | SBSDIO_FORCE_HT), &err);
+			brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					   (saveclk | SBSDIO_FORCE_HT), &err);
 		if (err)
 			brcmf_err("Failed to force clock for F2: err %d\n",
 				  err);
@@ -2460,8 +2454,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
 		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
 
 		/* Clear any pending interrupts now that F2 is disabled */
-		w_sdreg32(bus, local_hostintmask,
-			  offsetof(struct sdpcmd_regs, intstatus));
+		w_sdreg32(bus, local_hostintmask, SD_REG(intstatus));
 
 		sdio_release_host(sdiodev->func[1]);
 	}
@@ -2501,15 +2494,14 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
 
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
-	struct brcmf_core *buscore;
+	struct brcmf_core *buscore = bus->sdio_core;
 	u32 addr;
 	unsigned long val;
 	int ret;
 
-	buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
-	addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
+	addr = buscore->base + SD_REG(intstatus);
 
-	val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
+	val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
 	bus->sdcnt.f1regdata++;
 	if (ret != 0)
 		return ret;
@@ -2519,7 +2511,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 
 	/* Clear interrupts */
 	if (val) {
-		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
+		brcmf_sdiod_writel(bus->sdiodev, addr, val, &ret);
 		bus->sdcnt.f1regdata++;
 		atomic_or(val, &bus->intstatus);
 	}
@@ -2545,23 +2537,23 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
 #ifdef DEBUG
 		/* Check for inconsistent device control */
-		devctl = brcmf_sdiod_regrb(bus->sdiodev,
-					   SBSDIO_DEVICE_CTL, &err);
+		devctl = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL,
+					   &err);
 #endif				/* DEBUG */
 
 		/* Read CSR, if clock on switch to AVAIL, else ignore */
-		clkctl = brcmf_sdiod_regrb(bus->sdiodev,
+		clkctl = brcmf_sdiod_readb(bus->sdiodev,
 					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
 
 		brcmf_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
 			  devctl, clkctl);
 
 		if (SBSDIO_HTAV(clkctl)) {
-			devctl = brcmf_sdiod_regrb(bus->sdiodev,
+			devctl = brcmf_sdiod_readb(bus->sdiodev,
 						   SBSDIO_DEVICE_CTL, &err);
 			devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
-			brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
-					  devctl, &err);
+			brcmf_sdiod_writeb(bus->sdiodev,
+					   SBSDIO_DEVICE_CTL, devctl, &err);
 			bus->clkstate = CLK_AVAIL;
 		}
 	}
@@ -2584,11 +2576,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 	 */
 	if (intstatus & I_HMB_FC_CHANGE) {
 		intstatus &= ~I_HMB_FC_CHANGE;
-		err = w_sdreg32(bus, I_HMB_FC_CHANGE,
-				offsetof(struct sdpcmd_regs, intstatus));
+		err = w_sdreg32(bus, I_HMB_FC_CHANGE, SD_REG(intstatus));
 
-		err = r_sdreg32(bus, &newstatus,
-				offsetof(struct sdpcmd_regs, intstatus));
+		err = r_sdreg32(bus, &newstatus, SD_REG(intstatus));
 		bus->sdcnt.f1regdata += 2;
 		atomic_set(&bus->fcstate,
 			   !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
@@ -3347,31 +3337,31 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 
 	brcmf_dbg(TRACE, "Enter\n");
 
-	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
+	val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
 	if (err) {
 		brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
 		return;
 	}
 
 	val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
 	if (err) {
 		brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
 		return;
 	}
 
 	/* Add CMD14 Support */
-	brcmf_sdiod_regwb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
-			  (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
-			   SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
-			  &err);
+	brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
+			     (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+			      SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+			     &err);
 	if (err) {
 		brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
 		return;
 	}
 
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-			  SBSDIO_FORCE_HT, &err);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+			   SBSDIO_FORCE_HT, &err);
 	if (err) {
 		brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
 		return;
@@ -3385,16 +3375,17 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 /* enable KSO bit */
 static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
 {
+	struct brcmf_core *core = bus->sdio_core;
 	u8 val;
 	int err = 0;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
 	/* KSO bit added in SDIO core rev 12 */
-	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
+	if (core->rev < 12)
 		return 0;
 
-	val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
+	val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
 	if (err) {
 		brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
 		return err;
@@ -3403,8 +3394,8 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
 	if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) {
 		val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN <<
 			SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
-		brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
-				  val, &err);
+		brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
+				   val, &err);
 		if (err) {
 			brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
 			return err;
@@ -3420,6 +3411,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 	struct brcmf_sdio *bus = sdiodev->bus;
+	struct brcmf_core *core = bus->sdio_core;
 	uint pad_size;
 	u32 value;
 	int err;
@@ -3428,7 +3420,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
 	 * a device perspective, ie. bus:txglom affects the
 	 * bus transfers from device to host.
 	 */
-	if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
+	if (core->rev < 12) {
 		/* for sdio core rev < 12, disable txgloming */
 		value = 0;
 		err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3565,9 +3557,9 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
 				u8 devpend;
 
 				sdio_claim_host(bus->sdiodev->func[1]);
-				devpend = brcmf_sdiod_regrb(bus->sdiodev,
-							    SDIO_CCCR_INTx,
-							    NULL);
+				devpend = brcmf_sdiod_func0_rb(bus->sdiodev,
+							       SDIO_CCCR_INTx,
+							       NULL);
 				sdio_release_host(bus->sdiodev->func[1]);
 				intstatus = devpend & (INTR_STATUS_FUNC1 |
 						       INTR_STATUS_FUNC2);
@@ -3705,12 +3697,12 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
 			}
 		}
 		addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
-		brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
-		cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+		brcmf_sdiod_writel(sdiodev, addr, 1, NULL);
+		cc_data_temp = brcmf_sdiod_readl(sdiodev, addr, NULL);
 		cc_data_temp &= ~str_mask;
 		drivestrength_sel <<= str_shift;
 		cc_data_temp |= drivestrength_sel;
-		brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+		brcmf_sdiod_writel(sdiodev, addr, cc_data_temp, NULL);
 
 		brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
 			  str_tab[i].strength, drivestrength, cc_data_temp);
@@ -3725,7 +3717,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 
 	/* Try forcing SDIO core to do ALPAvail request only */
 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
 	if (err) {
 		brcmf_err("error writing for HT off\n");
 		return err;
@@ -3733,8 +3725,7 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 
 	/* If register supported, wait for ALPAvail and then force ALP */
 	/* This may take up to 15 milliseconds */
-	clkval = brcmf_sdiod_regrb(sdiodev,
-				   SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+	clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL);
 
 	if ((clkval & ~SBSDIO_AVBITS) != clkset) {
 		brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
@@ -3742,10 +3733,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 		return -EACCES;
 	}
 
-	SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
-					      SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
-			!SBSDIO_ALPAV(clkval)),
-			PMU_MAX_TRANSITION_DLY);
+	SPINWAIT(((clkval = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					      NULL)),
+		 !SBSDIO_ALPAV(clkval)),
+		 PMU_MAX_TRANSITION_DLY);
+
 	if (!SBSDIO_ALPAV(clkval)) {
 		brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
 			  clkval);
@@ -3753,11 +3745,11 @@ static int brcmf_sdio_buscoreprep(void *ctx)
 	}
 
 	clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
 	udelay(65);
 
 	/* Also, disable the extra SDIO pull-ups */
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
 
 	return 0;
 }
@@ -3766,13 +3758,12 @@ static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip,
 					u32 rstvec)
 {
 	struct brcmf_sdio_dev *sdiodev = ctx;
-	struct brcmf_core *core;
+	struct brcmf_core *core = sdiodev->bus->sdio_core;
 	u32 reg_addr;
 
 	/* clear all interrupts */
-	core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
-	reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
-	brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+	reg_addr = core->base + SD_REG(intstatus);
+	brcmf_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
 
 	if (rstvec)
 		/* Write reset vector to address 0 */
@@ -3785,7 +3776,7 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
 	struct brcmf_sdio_dev *sdiodev = ctx;
 	u32 val, rev;
 
-	val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+	val = brcmf_sdiod_readl(sdiodev, addr, NULL);
 	if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
 	     sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
 	    addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
@@ -3802,7 +3793,7 @@ static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
 {
 	struct brcmf_sdio_dev *sdiodev = ctx;
 
-	brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+	brcmf_sdiod_writel(sdiodev, addr, val, NULL);
 }
 
 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
@@ -3826,18 +3817,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 	sdio_claim_host(sdiodev->func[1]);
 
 	pr_debug("F1 signature read @0x18000000=0x%4x\n",
-		 brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL));
+		 brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL));
 
 	/*
 	 * Force PLL off until brcmf_chip_attach()
 	 * programs PLL control regs
 	 */
 
-	brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-			  BRCMF_INIT_CLKCTL1, &err);
+	brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1,
+			   &err);
 	if (!err)
-		clkctl = brcmf_sdiod_regrb(sdiodev,
-					   SBSDIO_FUNC1_CHIPCLKCSR, &err);
+		clkctl = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+					   &err);
 
 	if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
 		brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
@@ -3851,6 +3842,12 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 		bus->ci = NULL;
 		goto fail;
 	}
+
+	/* Pick up the SDIO core info struct from chip.c */
+	bus->sdio_core   = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+	if (!bus->sdio_core)
+		goto fail;
+
 	sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
 						   BRCMF_BUSTYPE_SDIO,
 						   bus->ci->chip,
@@ -3897,25 +3894,25 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 	brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength);
 
 	/* Set card control so an SDIO card reset does a WLAN backplane reset */
-	reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
+	reg_val = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err);
 	if (err)
 		goto fail;
 
 	reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET;
 
-	brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
+	brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err);
 	if (err)
 		goto fail;
 
 	/* set PMUControl so a backplane reset does PMU state reload */
 	reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
-	reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err);
+	reg_val = brcmf_sdiod_readl(sdiodev, reg_addr, &err);
 	if (err)
 		goto fail;
 
 	reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
 
-	brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err);
+	brcmf_sdiod_writel(sdiodev, reg_addr, reg_val, &err);
 	if (err)
 		goto fail;
 
@@ -4055,10 +4052,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 		goto release;
 
 	/* Force clocks on backplane to be sure F2 interrupt propagates */
-	saveclk = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+	saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err);
 	if (!err) {
-		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  (saveclk | SBSDIO_FORCE_HT), &err);
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   (saveclk | SBSDIO_FORCE_HT), &err);
 	}
 	if (err) {
 		brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -4067,7 +4064,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 
 	/* Enable function 2 (frame transfers) */
 	w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
-		  offsetof(struct sdpcmd_regs, tosbmailboxdata));
+		  SD_REG(tosbmailboxdata));
 	err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
 
 
@@ -4077,10 +4074,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 	if (!err) {
 		/* Set up the interrupt mask and enable interrupts */
 		bus->hostintmask = HOSTINTMASK;
-		w_sdreg32(bus, bus->hostintmask,
-			  offsetof(struct sdpcmd_regs, hostintmask));
+		w_sdreg32(bus, bus->hostintmask, SD_REG(hostintmask));
 
-		brcmf_sdiod_regwb(sdiodev, SBSDIO_WATERMARK, 8, &err);
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err);
 	} else {
 		/* Disable F2 again */
 		sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
@@ -4091,8 +4087,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
 		brcmf_sdio_sr_init(bus);
 	} else {
 		/* Restore previous clock setting */
-		brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
-				  saveclk, &err);
+		brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
+				   saveclk, &err);
 	}
 
 	if (err == 0) {
@@ -4224,7 +4220,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
 	bus->rxflow = false;
 
 	/* Done with backplane-dependent accesses, can drop clock... */
-	brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
+	brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
 
 	sdio_release_host(bus->sdiodev->func[1]);
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index f3da32f..01def16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -50,17 +50,19 @@
 #define SBSDIO_NUM_FUNCTION		3
 
 /* function 0 vendor specific CCCR registers */
-#define SDIO_CCCR_BRCM_CARDCAP			0xf0
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	0x02
-#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT	0x04
-#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC	0x08
-#define SDIO_CCCR_BRCM_CARDCTRL		0xf1
-#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET	0x02
-#define SDIO_CCCR_BRCM_SEPINT			0xf2
 
-#define  SDIO_SEPINT_MASK		0x01
-#define  SDIO_SEPINT_OE			0x02
-#define  SDIO_SEPINT_ACT_HI		0x04
+#define SDIO_CCCR_BRCM_CARDCAP			0xf0
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT	BIT(1)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT	BIT(2)
+#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC	BIT(3)
+
+#define SDIO_CCCR_BRCM_CARDCTRL			0xf1
+#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET	BIT(1)
+
+#define SDIO_CCCR_BRCM_SEPINT			0xf2
+#define SDIO_CCCR_BRCM_SEPINT_MASK		BIT(0)
+#define SDIO_CCCR_BRCM_SEPINT_OE		BIT(1)
+#define SDIO_CCCR_BRCM_SEPINT_ACT_HI		BIT(2)
 
 /* function 1 miscellaneous registers */
 
@@ -131,11 +133,6 @@
 /* with b15, maps to 32-bit SB access */
 #define SBSDIO_SB_ACCESS_2_4B_FLAG	0x08000
 
-/* valid bits in SBSDIO_FUNC1_SBADDRxxx regs */
-
-#define SBSDIO_SBADDRLOW_MASK		0x80	/* Valid bits in SBADDRLOW */
-#define SBSDIO_SBADDRMID_MASK		0xff	/* Valid bits in SBADDRMID */
-#define SBSDIO_SBADDRHIGH_MASK		0xffU	/* Valid bits in SBADDRHIGH */
 /* Address bits from SBADDR regs */
 #define SBSDIO_SBWINDOW_MASK		0xffff8000
 
@@ -296,13 +293,24 @@ struct sdpcmd_regs {
 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
-/* sdio device register access interface */
-u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-u32 brcmf_sdiod_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-void brcmf_sdiod_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
-		       int *ret);
-void brcmf_sdiod_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
-		       int *ret);
+/* SDIO device register access interface */
+/* Accessors for SDIO Function 0 */
+#define brcmf_sdiod_func0_rb(sdiodev, addr, r) \
+	sdio_readb((sdiodev)->func[0], (addr), (r))
+
+#define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \
+	sdio_writeb((sdiodev)->func[0], (v), (addr), (ret))
+
+/* Accessors for SDIO Function 1 */
+#define brcmf_sdiod_readb(sdiodev, addr, r) \
+	sdio_readb((sdiodev)->func[1], (addr), (r))
+
+#define brcmf_sdiod_writeb(sdiodev, addr, v, ret) \
+	sdio_writeb((sdiodev)->func[1], (v), (addr), (ret))
+
+u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+			int *ret);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
@@ -342,7 +350,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 		      u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn);
 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
 			      enum brcmf_sdiod_state state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 763e8ba..7e01981b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -16049,8 +16049,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
 		wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_UPDATEGAINU,
 				       rfseq_updategainu_events,
 				       rfseq_updategainu_dlys,
-				       sizeof(rfseq_updategainu_events) /
-				       sizeof(rfseq_updategainu_events[0]));
+				       ARRAY_SIZE(rfseq_updategainu_events));
 
 		mod_phy_reg(pi, 0x153, (0xff << 8), (90 << 8));
 
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
index dbf50ef..533bd4b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -14,6 +14,7 @@
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/kernel.h>
 #include <types.h>
 #include "phytbl_n.h"
 
@@ -4437,109 +4438,39 @@ static const u16 loft_lut_core1_rev0[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev0_volatile[] = {
-	{&bdi_tbl_rev0, sizeof(bdi_tbl_rev0) / sizeof(bdi_tbl_rev0[0]), 21, 0,
-	 16}
-	,
-	{&pltlut_tbl_rev0, sizeof(pltlut_tbl_rev0) / sizeof(pltlut_tbl_rev0[0]),
-	 20, 0, 32}
-	,
-	{&gainctrl_lut_core0_rev0,
-	 sizeof(gainctrl_lut_core0_rev0) / sizeof(gainctrl_lut_core0_rev0[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev0,
-	 sizeof(gainctrl_lut_core1_rev0) / sizeof(gainctrl_lut_core1_rev0[0]),
-	 27, 192, 32}
-	,
-
-	{&est_pwr_lut_core0_rev0,
-	 sizeof(est_pwr_lut_core0_rev0) / sizeof(est_pwr_lut_core0_rev0[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev0,
-	 sizeof(est_pwr_lut_core1_rev0) / sizeof(est_pwr_lut_core1_rev0[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev0,
-	 sizeof(adj_pwr_lut_core0_rev0) / sizeof(adj_pwr_lut_core0_rev0[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev0,
-	 sizeof(adj_pwr_lut_core1_rev0) / sizeof(adj_pwr_lut_core1_rev0[0]), 27,
-	 64, 8}
-	,
-	{&iq_lut_core0_rev0,
-	 sizeof(iq_lut_core0_rev0) / sizeof(iq_lut_core0_rev0[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev0,
-	 sizeof(iq_lut_core1_rev0) / sizeof(iq_lut_core1_rev0[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev0,
-	 sizeof(loft_lut_core0_rev0) / sizeof(loft_lut_core0_rev0[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev0,
-	 sizeof(loft_lut_core1_rev0) / sizeof(loft_lut_core1_rev0[0]), 27, 448,
-	 16}
-	,
+	{&bdi_tbl_rev0, ARRAY_SIZE(bdi_tbl_rev0), 21, 0, 16},
+	{&pltlut_tbl_rev0, ARRAY_SIZE(pltlut_tbl_rev0), 20, 0, 32},
+	{&gainctrl_lut_core0_rev0, ARRAY_SIZE(gainctrl_lut_core0_rev0), 26, 192, 32},
+	{&gainctrl_lut_core1_rev0, ARRAY_SIZE(gainctrl_lut_core1_rev0), 27, 192, 32},
+	{&est_pwr_lut_core0_rev0, ARRAY_SIZE(est_pwr_lut_core0_rev0), 26, 0, 8},
+	{&est_pwr_lut_core1_rev0, ARRAY_SIZE(est_pwr_lut_core1_rev0), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev0, ARRAY_SIZE(adj_pwr_lut_core0_rev0), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev0, ARRAY_SIZE(adj_pwr_lut_core1_rev0), 27, 64, 8},
+	{&iq_lut_core0_rev0, ARRAY_SIZE(iq_lut_core0_rev0), 26, 320, 32},
+	{&iq_lut_core1_rev0, ARRAY_SIZE(iq_lut_core1_rev0), 27, 320, 32},
+	{&loft_lut_core0_rev0, ARRAY_SIZE(loft_lut_core0_rev0), 26, 448, 16},
+	{&loft_lut_core1_rev0, ARRAY_SIZE(loft_lut_core1_rev0), 27, 448, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev0[] = {
-	{&frame_struct_rev0,
-	 sizeof(frame_struct_rev0) / sizeof(frame_struct_rev0[0]), 10, 0, 32}
-	,
-	{&frame_lut_rev0, sizeof(frame_lut_rev0) / sizeof(frame_lut_rev0[0]),
-	 24, 0, 8}
-	,
-	{&tmap_tbl_rev0, sizeof(tmap_tbl_rev0) / sizeof(tmap_tbl_rev0[0]), 12,
-	 0, 32}
-	,
-	{&tdtrn_tbl_rev0, sizeof(tdtrn_tbl_rev0) / sizeof(tdtrn_tbl_rev0[0]),
-	 14, 0, 32}
-	,
-	{&intlv_tbl_rev0, sizeof(intlv_tbl_rev0) / sizeof(intlv_tbl_rev0[0]),
-	 13, 0, 32}
-	,
-	{&pilot_tbl_rev0, sizeof(pilot_tbl_rev0) / sizeof(pilot_tbl_rev0[0]),
-	 11, 0, 16}
-	,
-	{&tdi_tbl20_ant0_rev0,
-	 sizeof(tdi_tbl20_ant0_rev0) / sizeof(tdi_tbl20_ant0_rev0[0]), 19, 128,
-	 32}
-	,
-	{&tdi_tbl20_ant1_rev0,
-	 sizeof(tdi_tbl20_ant1_rev0) / sizeof(tdi_tbl20_ant1_rev0[0]), 19, 256,
-	 32}
-	,
-	{&tdi_tbl40_ant0_rev0,
-	 sizeof(tdi_tbl40_ant0_rev0) / sizeof(tdi_tbl40_ant0_rev0[0]), 19, 640,
-	 32}
-	,
-	{&tdi_tbl40_ant1_rev0,
-	 sizeof(tdi_tbl40_ant1_rev0) / sizeof(tdi_tbl40_ant1_rev0[0]), 19, 768,
-	 32}
-	,
-	{&chanest_tbl_rev0,
-	 sizeof(chanest_tbl_rev0) / sizeof(chanest_tbl_rev0[0]), 22, 0, 32}
-	,
-	{&mcs_tbl_rev0, sizeof(mcs_tbl_rev0) / sizeof(mcs_tbl_rev0[0]), 18, 0,
-	 8}
-	,
-	{&noise_var_tbl0_rev0,
-	 sizeof(noise_var_tbl0_rev0) / sizeof(noise_var_tbl0_rev0[0]), 16, 0,
-	 32}
-	,
-	{&noise_var_tbl1_rev0,
-	 sizeof(noise_var_tbl1_rev0) / sizeof(noise_var_tbl1_rev0[0]), 16, 128,
-	 32}
-	,
+	{&frame_struct_rev0, ARRAY_SIZE(frame_struct_rev0), 10, 0, 32},
+	{&frame_lut_rev0, ARRAY_SIZE(frame_lut_rev0), 24, 0, 8},
+	{&tmap_tbl_rev0, ARRAY_SIZE(tmap_tbl_rev0), 12, 0, 32},
+	{&tdtrn_tbl_rev0, ARRAY_SIZE(tdtrn_tbl_rev0), 14, 0, 32},
+	{&intlv_tbl_rev0, ARRAY_SIZE(intlv_tbl_rev0), 13, 0, 32},
+	{&pilot_tbl_rev0, ARRAY_SIZE(pilot_tbl_rev0), 11, 0, 16},
+	{&tdi_tbl20_ant0_rev0, ARRAY_SIZE(tdi_tbl20_ant0_rev0), 19, 128, 32},
+	{&tdi_tbl20_ant1_rev0, ARRAY_SIZE(tdi_tbl20_ant1_rev0), 19, 256, 32},
+	{&tdi_tbl40_ant0_rev0, ARRAY_SIZE(tdi_tbl40_ant0_rev0), 19, 640, 32},
+	{&tdi_tbl40_ant1_rev0, ARRAY_SIZE(tdi_tbl40_ant1_rev0), 19, 768, 32},
+	{&chanest_tbl_rev0, ARRAY_SIZE(chanest_tbl_rev0), 22, 0, 32},
+	{&mcs_tbl_rev0, ARRAY_SIZE(mcs_tbl_rev0), 18, 0, 8},
+	{&noise_var_tbl0_rev0, ARRAY_SIZE(noise_var_tbl0_rev0), 16, 0, 32},
+	{&noise_var_tbl1_rev0, ARRAY_SIZE(noise_var_tbl1_rev0), 16, 128, 32},
 };
 
-const u32 mimophytbl_info_sz_rev0 =
-	sizeof(mimophytbl_info_rev0) / sizeof(mimophytbl_info_rev0[0]);
-const u32 mimophytbl_info_sz_rev0_volatile =
-	sizeof(mimophytbl_info_rev0_volatile) /
-	sizeof(mimophytbl_info_rev0_volatile[0]);
+const u32 mimophytbl_info_sz_rev0 = ARRAY_SIZE(mimophytbl_info_rev0);
+const u32 mimophytbl_info_sz_rev0_volatile = ARRAY_SIZE(mimophytbl_info_rev0_volatile);
 
 static const u16 ant_swctrl_tbl_rev3[] = {
 	0x0082,
@@ -9363,132 +9294,53 @@ static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
-	{&ant_swctrl_tbl_rev3,
-	 sizeof(ant_swctrl_tbl_rev3) / sizeof(ant_swctrl_tbl_rev3[0]), 9, 0, 16}
-	,
+	{&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile1[] = {
-	{&ant_swctrl_tbl_rev3_1,
-	 sizeof(ant_swctrl_tbl_rev3_1) / sizeof(ant_swctrl_tbl_rev3_1[0]), 9, 0,
-	 16}
-	,
+	{&ant_swctrl_tbl_rev3_1, ARRAY_SIZE(ant_swctrl_tbl_rev3_1), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile2[] = {
-	{&ant_swctrl_tbl_rev3_2,
-	 sizeof(ant_swctrl_tbl_rev3_2) / sizeof(ant_swctrl_tbl_rev3_2[0]), 9, 0,
-	 16}
-	,
+	{&ant_swctrl_tbl_rev3_2, ARRAY_SIZE(ant_swctrl_tbl_rev3_2), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3_volatile3[] = {
-	{&ant_swctrl_tbl_rev3_3,
-	 sizeof(ant_swctrl_tbl_rev3_3) / sizeof(ant_swctrl_tbl_rev3_3[0]), 9, 0,
-	 16}
-	,
+	{&ant_swctrl_tbl_rev3_3, ARRAY_SIZE(ant_swctrl_tbl_rev3_3), 9, 0, 16},
 };
 
 const struct phytbl_info mimophytbl_info_rev3[] = {
-	{&frame_struct_rev3,
-	 sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
-	,
-	{&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
-	 11, 0, 16}
-	,
-	{&tmap_tbl_rev3, sizeof(tmap_tbl_rev3) / sizeof(tmap_tbl_rev3[0]), 12,
-	 0, 32}
-	,
-	{&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
-	 13, 0, 32}
-	,
-	{&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
-	 14, 0, 32}
-	,
-	{&noise_var_tbl_rev3,
-	 sizeof(noise_var_tbl_rev3) / sizeof(noise_var_tbl_rev3[0]), 16, 0, 32}
-	,
-	{&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
-	 16}
-	,
-	{&tdi_tbl20_ant0_rev3,
-	 sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
-	 32}
-	,
-	{&tdi_tbl20_ant1_rev3,
-	 sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
-	 32}
-	,
-	{&tdi_tbl40_ant0_rev3,
-	 sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
-	 32}
-	,
-	{&tdi_tbl40_ant1_rev3,
-	 sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
-	 32}
-	,
-	{&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
-	 20, 0, 32}
-	,
-	{&chanest_tbl_rev3,
-	 sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
-	,
-	{&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
-	 24, 0, 8}
-	,
-	{&est_pwr_lut_core0_rev3,
-	 sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev3,
-	 sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev3,
-	 sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev3,
-	 sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-	 64, 8}
-	,
-	{&gainctrl_lut_core0_rev3,
-	 sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev3,
-	 sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-	 27, 192, 32}
-	,
-	{&iq_lut_core0_rev3,
-	 sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev3,
-	 sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev3,
-	 sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev3,
-	 sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-	 16}
+	{&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+	{&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+	{&tmap_tbl_rev3, ARRAY_SIZE(tmap_tbl_rev3), 12, 0, 32},
+	{&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+	{&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+	{&noise_var_tbl_rev3, ARRAY_SIZE(noise_var_tbl_rev3), 16, 0, 32},
+	{&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+	{&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+	{&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+	{&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+	{&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+	{&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+	{&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+	{&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+	{&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+	{&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+	{&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+	{&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+	{&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+	{&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+	{&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+	{&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16}
 };
 
-const u32 mimophytbl_info_sz_rev3 =
-	sizeof(mimophytbl_info_rev3) / sizeof(mimophytbl_info_rev3[0]);
-const u32 mimophytbl_info_sz_rev3_volatile =
-	sizeof(mimophytbl_info_rev3_volatile) /
-	sizeof(mimophytbl_info_rev3_volatile[0]);
-const u32 mimophytbl_info_sz_rev3_volatile1 =
-	sizeof(mimophytbl_info_rev3_volatile1) /
-	sizeof(mimophytbl_info_rev3_volatile1[0]);
-const u32 mimophytbl_info_sz_rev3_volatile2 =
-	sizeof(mimophytbl_info_rev3_volatile2) /
-	sizeof(mimophytbl_info_rev3_volatile2[0]);
-const u32 mimophytbl_info_sz_rev3_volatile3 =
-	sizeof(mimophytbl_info_rev3_volatile3) /
-	sizeof(mimophytbl_info_rev3_volatile3[0]);
+const u32 mimophytbl_info_sz_rev3 = ARRAY_SIZE(mimophytbl_info_rev3);
+const u32 mimophytbl_info_sz_rev3_volatile = ARRAY_SIZE(mimophytbl_info_rev3_volatile);
+const u32 mimophytbl_info_sz_rev3_volatile1 = ARRAY_SIZE(mimophytbl_info_rev3_volatile1);
+const u32 mimophytbl_info_sz_rev3_volatile2 = ARRAY_SIZE(mimophytbl_info_rev3_volatile2);
+const u32 mimophytbl_info_sz_rev3_volatile3 = ARRAY_SIZE(mimophytbl_info_rev3_volatile3);
 
 static const u32 tmap_tbl_rev7[] = {
 	0x8a88aa80,
@@ -10469,162 +10321,58 @@ static const u32 papd_cal_scalars_tbl_core1_rev7[] = {
 };
 
 const struct phytbl_info mimophytbl_info_rev7[] = {
-	{&frame_struct_rev3,
-	 sizeof(frame_struct_rev3) / sizeof(frame_struct_rev3[0]), 10, 0, 32}
-	,
-	{&pilot_tbl_rev3, sizeof(pilot_tbl_rev3) / sizeof(pilot_tbl_rev3[0]),
-	 11, 0, 16}
-	,
-	{&tmap_tbl_rev7, sizeof(tmap_tbl_rev7) / sizeof(tmap_tbl_rev7[0]), 12,
-	 0, 32}
-	,
-	{&intlv_tbl_rev3, sizeof(intlv_tbl_rev3) / sizeof(intlv_tbl_rev3[0]),
-	 13, 0, 32}
-	,
-	{&tdtrn_tbl_rev3, sizeof(tdtrn_tbl_rev3) / sizeof(tdtrn_tbl_rev3[0]),
-	 14, 0, 32}
-	,
-	{&noise_var_tbl_rev7,
-	 sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
-	,
-	{&mcs_tbl_rev3, sizeof(mcs_tbl_rev3) / sizeof(mcs_tbl_rev3[0]), 18, 0,
-	 16}
-	,
-	{&tdi_tbl20_ant0_rev3,
-	 sizeof(tdi_tbl20_ant0_rev3) / sizeof(tdi_tbl20_ant0_rev3[0]), 19, 128,
-	 32}
-	,
-	{&tdi_tbl20_ant1_rev3,
-	 sizeof(tdi_tbl20_ant1_rev3) / sizeof(tdi_tbl20_ant1_rev3[0]), 19, 256,
-	 32}
-	,
-	{&tdi_tbl40_ant0_rev3,
-	 sizeof(tdi_tbl40_ant0_rev3) / sizeof(tdi_tbl40_ant0_rev3[0]), 19, 640,
-	 32}
-	,
-	{&tdi_tbl40_ant1_rev3,
-	 sizeof(tdi_tbl40_ant1_rev3) / sizeof(tdi_tbl40_ant1_rev3[0]), 19, 768,
-	 32}
-	,
-	{&pltlut_tbl_rev3, sizeof(pltlut_tbl_rev3) / sizeof(pltlut_tbl_rev3[0]),
-	 20, 0, 32}
-	,
-	{&chanest_tbl_rev3,
-	 sizeof(chanest_tbl_rev3) / sizeof(chanest_tbl_rev3[0]), 22, 0, 32}
-	,
-	{&frame_lut_rev3, sizeof(frame_lut_rev3) / sizeof(frame_lut_rev3[0]),
-	 24, 0, 8}
-	,
-	{&est_pwr_lut_core0_rev3,
-	 sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev3,
-	 sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev3,
-	 sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev3,
-	 sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-	 64, 8}
-	,
-	{&gainctrl_lut_core0_rev3,
-	 sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev3,
-	 sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-	 27, 192, 32}
-	,
-	{&iq_lut_core0_rev3,
-	 sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev3,
-	 sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev3,
-	 sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev3,
-	 sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-	 16}
-	,
+	{&frame_struct_rev3, ARRAY_SIZE(frame_struct_rev3), 10, 0, 32},
+	{&pilot_tbl_rev3, ARRAY_SIZE(pilot_tbl_rev3), 11, 0, 16},
+	{&tmap_tbl_rev7, ARRAY_SIZE(tmap_tbl_rev7), 12, 0, 32},
+	{&intlv_tbl_rev3, ARRAY_SIZE(intlv_tbl_rev3), 13, 0, 32},
+	{&tdtrn_tbl_rev3, ARRAY_SIZE(tdtrn_tbl_rev3), 14, 0, 32},
+	{&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+	{&mcs_tbl_rev3, ARRAY_SIZE(mcs_tbl_rev3), 18, 0, 16},
+	{&tdi_tbl20_ant0_rev3, ARRAY_SIZE(tdi_tbl20_ant0_rev3), 19, 128, 32},
+	{&tdi_tbl20_ant1_rev3, ARRAY_SIZE(tdi_tbl20_ant1_rev3), 19, 256, 32},
+	{&tdi_tbl40_ant0_rev3, ARRAY_SIZE(tdi_tbl40_ant0_rev3), 19, 640, 32},
+	{&tdi_tbl40_ant1_rev3, ARRAY_SIZE(tdi_tbl40_ant1_rev3), 19, 768, 32},
+	{&pltlut_tbl_rev3, ARRAY_SIZE(pltlut_tbl_rev3), 20, 0, 32},
+	{&chanest_tbl_rev3, ARRAY_SIZE(chanest_tbl_rev3), 22, 0, 32},
+	{&frame_lut_rev3, ARRAY_SIZE(frame_lut_rev3), 24, 0, 8},
+	{&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+	{&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+	{&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+	{&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+	{&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+	{&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+	{&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+	{&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
 	{&papd_comp_rfpwr_tbl_core0_rev3,
-	 sizeof(papd_comp_rfpwr_tbl_core0_rev3) /
-	 sizeof(papd_comp_rfpwr_tbl_core0_rev3[0]), 26, 576, 16}
-	,
+	 ARRAY_SIZE(papd_comp_rfpwr_tbl_core0_rev3), 26, 576, 16},
 	{&papd_comp_rfpwr_tbl_core1_rev3,
-	 sizeof(papd_comp_rfpwr_tbl_core1_rev3) /
-	 sizeof(papd_comp_rfpwr_tbl_core1_rev3[0]), 27, 576, 16}
-	,
+	 ARRAY_SIZE(papd_comp_rfpwr_tbl_core1_rev3), 27, 576, 16},
 	{&papd_comp_epsilon_tbl_core0_rev7,
-	 sizeof(papd_comp_epsilon_tbl_core0_rev7) /
-	 sizeof(papd_comp_epsilon_tbl_core0_rev7[0]), 31, 0, 32}
-	,
+	 ARRAY_SIZE(papd_comp_epsilon_tbl_core0_rev7), 31, 0, 32},
 	{&papd_cal_scalars_tbl_core0_rev7,
-	 sizeof(papd_cal_scalars_tbl_core0_rev7) /
-	 sizeof(papd_cal_scalars_tbl_core0_rev7[0]), 32, 0, 32}
-	,
+	 ARRAY_SIZE(papd_cal_scalars_tbl_core0_rev7), 32, 0, 32},
 	{&papd_comp_epsilon_tbl_core1_rev7,
-	 sizeof(papd_comp_epsilon_tbl_core1_rev7) /
-	 sizeof(papd_comp_epsilon_tbl_core1_rev7[0]), 33, 0, 32}
-	,
+	 ARRAY_SIZE(papd_comp_epsilon_tbl_core1_rev7), 33, 0, 32},
 	{&papd_cal_scalars_tbl_core1_rev7,
-	 sizeof(papd_cal_scalars_tbl_core1_rev7) /
-	 sizeof(papd_cal_scalars_tbl_core1_rev7[0]), 34, 0, 32}
-	,
+	 ARRAY_SIZE(papd_cal_scalars_tbl_core1_rev7), 34, 0, 32},
 };
 
-const u32 mimophytbl_info_sz_rev7 =
-	sizeof(mimophytbl_info_rev7) / sizeof(mimophytbl_info_rev7[0]);
+const u32 mimophytbl_info_sz_rev7 = ARRAY_SIZE(mimophytbl_info_rev7);
 
 const struct phytbl_info mimophytbl_info_rev16[] = {
-	{&noise_var_tbl_rev7,
-	 sizeof(noise_var_tbl_rev7) / sizeof(noise_var_tbl_rev7[0]), 16, 0, 32}
-	,
-	{&est_pwr_lut_core0_rev3,
-	 sizeof(est_pwr_lut_core0_rev3) / sizeof(est_pwr_lut_core0_rev3[0]), 26,
-	 0, 8}
-	,
-	{&est_pwr_lut_core1_rev3,
-	 sizeof(est_pwr_lut_core1_rev3) / sizeof(est_pwr_lut_core1_rev3[0]), 27,
-	 0, 8}
-	,
-	{&adj_pwr_lut_core0_rev3,
-	 sizeof(adj_pwr_lut_core0_rev3) / sizeof(adj_pwr_lut_core0_rev3[0]), 26,
-	 64, 8}
-	,
-	{&adj_pwr_lut_core1_rev3,
-	 sizeof(adj_pwr_lut_core1_rev3) / sizeof(adj_pwr_lut_core1_rev3[0]), 27,
-	 64, 8}
-	,
-	{&gainctrl_lut_core0_rev3,
-	 sizeof(gainctrl_lut_core0_rev3) / sizeof(gainctrl_lut_core0_rev3[0]),
-	 26, 192, 32}
-	,
-	{&gainctrl_lut_core1_rev3,
-	 sizeof(gainctrl_lut_core1_rev3) / sizeof(gainctrl_lut_core1_rev3[0]),
-	 27, 192, 32}
-	,
-	{&iq_lut_core0_rev3,
-	 sizeof(iq_lut_core0_rev3) / sizeof(iq_lut_core0_rev3[0]), 26, 320, 32}
-	,
-	{&iq_lut_core1_rev3,
-	 sizeof(iq_lut_core1_rev3) / sizeof(iq_lut_core1_rev3[0]), 27, 320, 32}
-	,
-	{&loft_lut_core0_rev3,
-	 sizeof(loft_lut_core0_rev3) / sizeof(loft_lut_core0_rev3[0]), 26, 448,
-	 16}
-	,
-	{&loft_lut_core1_rev3,
-	 sizeof(loft_lut_core1_rev3) / sizeof(loft_lut_core1_rev3[0]), 27, 448,
-	 16}
-	,
+	{&noise_var_tbl_rev7, ARRAY_SIZE(noise_var_tbl_rev7), 16, 0, 32},
+	{&est_pwr_lut_core0_rev3, ARRAY_SIZE(est_pwr_lut_core0_rev3), 26, 0, 8},
+	{&est_pwr_lut_core1_rev3, ARRAY_SIZE(est_pwr_lut_core1_rev3), 27, 0, 8},
+	{&adj_pwr_lut_core0_rev3, ARRAY_SIZE(adj_pwr_lut_core0_rev3), 26, 64, 8},
+	{&adj_pwr_lut_core1_rev3, ARRAY_SIZE(adj_pwr_lut_core1_rev3), 27, 64, 8},
+	{&gainctrl_lut_core0_rev3, ARRAY_SIZE(gainctrl_lut_core0_rev3), 26, 192, 32},
+	{&gainctrl_lut_core1_rev3, ARRAY_SIZE(gainctrl_lut_core1_rev3), 27, 192, 32},
+	{&iq_lut_core0_rev3, ARRAY_SIZE(iq_lut_core0_rev3), 26, 320, 32},
+	{&iq_lut_core1_rev3, ARRAY_SIZE(iq_lut_core1_rev3), 27, 320, 32},
+	{&loft_lut_core0_rev3, ARRAY_SIZE(loft_lut_core0_rev3), 26, 448, 16},
+	{&loft_lut_core1_rev3, ARRAY_SIZE(loft_lut_core1_rev3), 27, 448, 16},
 };
 
-const u32 mimophytbl_info_sz_rev16 =
-	sizeof(mimophytbl_info_rev16) / sizeof(mimophytbl_info_rev16[0]);
+const u32 mimophytbl_info_sz_rev16 = ARRAY_SIZE(mimophytbl_info_rev16);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index ff136f2..e2c151a 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -9,7 +9,7 @@
 iwlwifi-objs		+= pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-objs		+= pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
 iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
-iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
 iwlwifi-objs		+= iwl-trans.o
 iwlwifi-objs		+= fw/notif-wait.o
 iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
new file mode 100644
index 0000000..48f6f80
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -0,0 +1,216 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL_22000_UCODE_API_MAX	36
+
+/* Lowest firmware API version supported */
+#define IWL_22000_UCODE_API_MIN	24
+
+/* NVM versions */
+#define IWL_22000_NVM_VERSION		0x0a1d
+#define IWL_22000_TX_POWER_VERSION	0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL_22000_DCCM_OFFSET		0x800000 /* LMAC1 */
+#define IWL_22000_DCCM_LEN		0x10000 /* LMAC1 */
+#define IWL_22000_DCCM2_OFFSET		0x880000
+#define IWL_22000_DCCM2_LEN		0x8000
+#define IWL_22000_SMEM_OFFSET		0x400000
+#define IWL_22000_SMEM_LEN		0xD0000
+
+#define IWL_22000_JF_FW_PRE	"iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE	"iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE	"iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_JF_B0_FW_PRE	"iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE	"iwlwifi-QuQnj-a0-hr-a0-"
+
+#define IWL_22000_HR_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_MODULE_FIRMWARE(api) \
+	IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_22000		10
+
+static const struct iwl_base_params iwl_22000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+	.num_of_queues = 512,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl_22000_ht_params = {
+	.stbc = true,
+	.ldpc = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_22000						\
+	.ucode_api_max = IWL_22000_UCODE_API_MAX,			\
+	.ucode_api_min = IWL_22000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_22000,			\
+	.max_inst_size = IWL60_RTC_INST_SIZE,				\
+	.max_data_size = IWL60_RTC_DATA_SIZE,				\
+	.base_params = &iwl_22000_base_params,				\
+	.led_mode = IWL_LED_RF_STATE,					\
+	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000,		\
+	.non_shared_ant = ANT_A,					\
+	.dccm_offset = IWL_22000_DCCM_OFFSET,				\
+	.dccm_len = IWL_22000_DCCM_LEN,					\
+	.dccm2_offset = IWL_22000_DCCM2_OFFSET,				\
+	.dccm2_len = IWL_22000_DCCM2_LEN,				\
+	.smem_offset = IWL_22000_SMEM_OFFSET,				\
+	.smem_len = IWL_22000_SMEM_LEN,					\
+	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
+	.apmg_not_supported = true,					\
+	.mq_rx_supported = true,					\
+	.vht_mu_mimo_supported = true,					\
+	.mac_addr_from_csr = true,					\
+	.use_tfh = true,						\
+	.rf_id = true,							\
+	.gen2 = true,							\
+	.nvm_type = IWL_NVM_EXT,					\
+	.dbgc_supported = true,						\
+	.tx_cmd_queue_size = 32,					\
+	.min_umac_error_event_table = 0x400000
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_HR_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.cdb = true,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_jf = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_JF_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_hr = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_F0_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
+	IWL_DEVICE_22000,
+	.ht_params = &iwl_22000_ht_params,
+	.nvm_ver = IWL_22000_NVM_VERSION,
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 9bb7c19..3f4d9ba 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -70,8 +70,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX	34
-#define IWL8265_UCODE_API_MAX	34
+#define IWL8000_UCODE_API_MAX	36
+#define IWL8265_UCODE_API_MAX	36
 
 /* Lowest firmware API version supported */
 #define IWL8000_UCODE_API_MIN	22
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index e7e75b4..90a1d14 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -55,7 +55,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX	34
+#define IWL9000_UCODE_API_MAX	36
 
 /* Lowest firmware API version supported */
 #define IWL9000_UCODE_API_MIN	30
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
deleted file mode 100644
index 705f83b0..0000000
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Copyright(c) 2015-2017 Intel Deutschland GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-#include <linux/stringify.h>
-#include "iwl-config.h"
-#include "iwl-agn-hw.h"
-
-/* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX	34
-
-/* Lowest firmware API version supported */
-#define IWL_A000_UCODE_API_MIN	24
-
-/* NVM versions */
-#define IWL_A000_NVM_VERSION		0x0a1d
-#define IWL_A000_TX_POWER_VERSION	0xffff /* meaningless */
-
-/* Memory offsets and lengths */
-#define IWL_A000_DCCM_OFFSET		0x800000 /* LMAC1 */
-#define IWL_A000_DCCM_LEN		0x10000 /* LMAC1 */
-#define IWL_A000_DCCM2_OFFSET		0x880000
-#define IWL_A000_DCCM2_LEN		0x8000
-#define IWL_A000_SMEM_OFFSET		0x400000
-#define IWL_A000_SMEM_LEN		0xD0000
-
-#define IWL_A000_JF_FW_PRE	"iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_HR_FW_PRE	"iwlwifi-Qu-a0-hr-a0-"
-#define IWL_A000_HR_CDB_FW_PRE	"iwlwifi-QuIcp-z0-hrcdb-a0-"
-#define IWL_A000_HR_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
-#define IWL_A000_JF_B0_FW_PRE	"iwlwifi-QuQnj-a0-jf-b0-"
-#define IWL_A000_HR_A0_FW_PRE	"iwlwifi-QuQnj-a0-hr-a0-"
-
-#define IWL_A000_HR_MODULE_FIRMWARE(api) \
-	IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_MODULE_FIRMWARE(api) \
-	IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
-	IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
-	IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
-#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
-	IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
-
-#define NVM_HW_SECTION_NUM_FAMILY_A000		10
-
-static const struct iwl_base_params iwl_a000_base_params = {
-	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000,
-	.num_of_queues = 512,
-	.shadow_ram_support = true,
-	.led_compensation = 57,
-	.wd_timeout = IWL_LONG_WD_TIMEOUT,
-	.max_event_log_size = 512,
-	.shadow_reg_enable = true,
-	.pcie_l1_allowed = true,
-};
-
-static const struct iwl_ht_params iwl_a000_ht_params = {
-	.stbc = true,
-	.ldpc = true,
-	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
-};
-
-#define IWL_DEVICE_A000							\
-	.ucode_api_max = IWL_A000_UCODE_API_MAX,			\
-	.ucode_api_min = IWL_A000_UCODE_API_MIN,			\
-	.device_family = IWL_DEVICE_FAMILY_A000,			\
-	.max_inst_size = IWL60_RTC_INST_SIZE,				\
-	.max_data_size = IWL60_RTC_DATA_SIZE,				\
-	.base_params = &iwl_a000_base_params,				\
-	.led_mode = IWL_LED_RF_STATE,					\
-	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000,		\
-	.non_shared_ant = ANT_A,					\
-	.dccm_offset = IWL_A000_DCCM_OFFSET,				\
-	.dccm_len = IWL_A000_DCCM_LEN,					\
-	.dccm2_offset = IWL_A000_DCCM2_OFFSET,				\
-	.dccm2_len = IWL_A000_DCCM2_LEN,				\
-	.smem_offset = IWL_A000_SMEM_OFFSET,				\
-	.smem_len = IWL_A000_SMEM_LEN,					\
-	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
-	.apmg_not_supported = true,					\
-	.mq_rx_supported = true,					\
-	.vht_mu_mimo_supported = true,					\
-	.mac_addr_from_csr = true,					\
-	.use_tfh = true,						\
-	.rf_id = true,							\
-	.gen2 = true,							\
-	.nvm_type = IWL_NVM_EXT,					\
-	.dbgc_supported = true,						\
-	.tx_cmd_queue_size = 32,					\
-	.min_umac_error_event_table = 0x400000
-
-const struct iwl_cfg iwla000_2ac_cfg_hr = {
-	.name = "Intel(R) Dual Band Wireless AC a000",
-	.fw_name_pre = IWL_A000_HR_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_hr_cdb = {
-	.name = "Intel(R) Dual Band Wireless AC a000",
-	.fw_name_pre = IWL_A000_HR_CDB_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-	.cdb = true,
-};
-
-const struct iwl_cfg iwla000_2ac_cfg_jf = {
-	.name = "Intel(R) Dual Band Wireless AC a000",
-	.fw_name_pre = IWL_A000_JF_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_hr = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_HR_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_HR_F0_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_JF_B0_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = {
-	.name = "Intel(R) Dual Band Wireless AX a000",
-	.fw_name_pre = IWL_A000_HR_A0_FW_PRE,
-	IWL_DEVICE_A000,
-	.ht_params = &iwl_a000_ht_params,
-	.nvm_ver = IWL_A000_NVM_VERSION,
-	.nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
-	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index 3684a3e..007bfe7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -95,8 +95,8 @@ enum {
 #define IWL_ALIVE_FLG_RFKILL	BIT(0)
 
 struct iwl_lmac_alive {
-	__le32 ucode_minor;
 	__le32 ucode_major;
+	__le32 ucode_minor;
 	u8 ver_subtype;
 	u8 ver_type;
 	u8 mac;
@@ -113,8 +113,8 @@ struct iwl_lmac_alive {
 } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
 
 struct iwl_umac_alive {
-	__le32 umac_minor;		/* UMAC version: minor */
 	__le32 umac_major;		/* UMAC version: major */
+	__le32 umac_minor;		/* UMAC version: minor */
 	__le32 error_info_addr;		/* SRAM address for UMAC error log */
 	__le32 dbg_print_buff_addr;
 } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index d09555a..87c1dde 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -188,11 +188,6 @@ enum iwl_bt_mxbox_dw3 {
 	BT_MBOX(3, UPDATE_REQUEST, 21, 1),
 };
 
-enum iwl_bt_mxbox_dw4 {
-	BT_MBOX(4, ATS_BT_INTERVAL, 0, 7),
-	BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7),
-};
-
 #define BT_MBOX_MSG(_notif, _num, _field)				     \
 	((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
 	>> BT_MBOX##_num##_##_field##_POS)
@@ -232,31 +227,6 @@ enum iwl_bt_ci_compliance {
  * @reserved: reserved
  */
 struct iwl_bt_coex_profile_notif {
-	__le32 mbox_msg[8];
-	__le32 msg_idx;
-	__le32 bt_ci_compliance;
-
-	__le32 primary_ch_lut;
-	__le32 secondary_ch_lut;
-	__le32 bt_activity_grading;
-	u8 ttc_status;
-	u8 rrc_status;
-	__le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */
-
-/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
- * @mbox_msg: message from BT to WiFi
- * @msg_idx: the index of the message
- * @bt_ci_compliance: enum %iwl_bt_ci_compliance
- * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
- * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
- * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
- * @ttc_status: is TTC enabled - one bit per PHY
- * @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
- */
-struct iwl_bt_coex_profile_notif_v4 {
 	__le32 mbox_msg[4];
 	__le32 msg_idx;
 	__le32 bt_ci_compliance;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 7ebbf09..f285bac 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -215,7 +215,7 @@ enum iwl_legacy_cmds {
 	/**
 	 * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
 	 *	&struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
-	 *	for newer (A000) hardware.
+	 *	for newer (22000) hardware.
 	 */
 	SCD_QUEUE_CFG = 0x1d,
 
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index aa76dcc..a57c722 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -83,6 +83,21 @@ enum iwl_data_path_subcmd_ids {
 	TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
 
 	/**
+	 * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+	 */
+	TLC_MNG_CONFIG_CMD = 0xF,
+
+	/**
+	 * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd
+	 */
+	TLC_MNG_NOTIF_REQ_CMD = 0x10,
+
+	/**
+	 * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+	 */
+	TLC_MNG_UPDATE_NOTIF = 0xF7,
+
+	/**
 	 * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
 	 */
 	STA_PM_NOTIF = 0xFD,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index a13fd8a..e9a6e56 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -62,6 +62,267 @@
 
 #include "mac.h"
 
+/**
+ * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support
+ * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
+ * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
+ */
+enum iwl_tlc_mng_cfg_flags_enum {
+	IWL_TLC_MNG_CFG_FLAGS_CCK_MSK	= BIT(0),
+	IWL_TLC_MNG_CFG_FLAGS_DD_MSK	= BIT(1),
+	IWL_TLC_MNG_CFG_FLAGS_STBC_MSK	= BIT(2),
+	IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK	= BIT(3),
+	IWL_TLC_MNG_CFG_FLAGS_BF_MSK	= BIT(4),
+	IWL_TLC_MNG_CFG_FLAGS_DCM_MSK	= BIT(5),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_cw_enum - channel width options
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
+ */
+enum iwl_tlc_mng_cfg_cw_enum {
+	IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+	IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ,
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_chains_enum - possible chains
+ * @IWL_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
+ * @IWL_TLC_MNG_CHAIN_C_MSK: chain C
+ */
+enum iwl_tlc_mng_cfg_chains_enum {
+	IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+	IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+	IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_gi_enum - guard interval options
+ * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
+ * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
+ * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
+ * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
+ */
+enum iwl_tlc_mng_cfg_gi_enum {
+	IWL_TLC_MNG_SGI_20MHZ_MSK  = BIT(0),
+	IWL_TLC_MNG_SGI_40MHZ_MSK  = BIT(1),
+	IWL_TLC_MNG_SGI_80MHZ_MSK  = BIT(2),
+	IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_mode_enum - supported modes
+ * @IWL_TLC_MNG_MODE_CCK: enable CCK
+ * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWL_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWL_TLC_MNG_MODE_HT: enable HT
+ * @IWL_TLC_MNG_MODE_VHT: enable VHT
+ * @IWL_TLC_MNG_MODE_HE: enable HE
+ * @IWL_TLC_MNG_MODE_INVALID: invalid value
+ * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ */
+enum iwl_tlc_mng_cfg_mode_enum {
+	IWL_TLC_MNG_MODE_CCK = 0,
+	IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+	IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+	IWL_TLC_MNG_MODE_HT,
+	IWL_TLC_MNG_MODE_VHT,
+	IWL_TLC_MNG_MODE_HE,
+	IWL_TLC_MNG_MODE_INVALID,
+	IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+};
+
+/**
+ * enum iwl_tlc_mng_vht_he_types_enum - VHT HE types
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
+ * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
+ */
+enum iwl_tlc_mng_vht_he_types_enum {
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+	IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM =
+		IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED,
+
+};
+
+/**
+ * enum iwl_tlc_mng_ht_rates_enum - HT/VHT rates
+ * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwl_tlc_mng_ht_rates_enum {
+	IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+	IWL_TLC_MNG_HT_RATE_MCS1,
+	IWL_TLC_MNG_HT_RATE_MCS2,
+	IWL_TLC_MNG_HT_RATE_MCS3,
+	IWL_TLC_MNG_HT_RATE_MCS4,
+	IWL_TLC_MNG_HT_RATE_MCS5,
+	IWL_TLC_MNG_HT_RATE_MCS6,
+	IWL_TLC_MNG_HT_RATE_MCS7,
+	IWL_TLC_MNG_HT_RATE_MCS8,
+	IWL_TLC_MNG_HT_RATE_MCS9,
+	IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9,
+};
+
+/* Maximum supported tx antennas number */
+#define MAX_RS_ANT_NUM 3
+
+/**
+ * struct tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_supp_ch_width: channel width
+ * @flags: bitmask of %IWL_TLC_MNG_CONFIG_FLAGS_ENABLE_\*
+ * @chains: bitmask of %IWL_TLC_MNG_CHAIN_\*
+ * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
+ * @valid_vht_he_types: bitmap of %IWL_TLC_MNG_VALID_VHT_HE_TYPES_\*
+ * @non_ht_supp_rates: bitmap of supported legacy rates
+ * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
+ * @mode: modulation type %IWL_TLC_MNG_MODE_\*
+ * @reserved2: reserved
+ * @he_supp_rates: bitmap of supported HE rates
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * @he_gi_support: 11ax HE guard interval
+ * @max_ampdu_cnt: max AMPDU size (frames count)
+ */
+struct iwl_tlc_config_cmd {
+	u8 sta_id;
+	u8 reserved1[3];
+	u8 max_supp_ch_width;
+	u8 chains;
+	u8 max_supp_ss;
+	u8 valid_vht_he_types;
+	__le16 flags;
+	__le16 non_ht_supp_rates;
+	__le16 ht_supp_rates[MAX_RS_ANT_NUM];
+	u8 mode;
+	u8 reserved2;
+	__le16 he_supp_rates;
+	u8 sgi_ch_width_supp;
+	u8 he_gi_support;
+	__le32 max_ampdu_cnt;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_TLC_NOTIF_INIT_RATE_POS 0
+#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS)
+#define IWL_TLC_NOTIF_REQ_INTERVAL (500)
+
+/**
+ * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes
+ * @sta_id: relevant station
+ * @reserved1: reserved
+ * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\*
+ * @interval: minimum time between notifications from TLC to the driver (msec)
+ * @reserved2: reserved
+ */
+struct iwl_tlc_notif_req_config_cmd {
+	u8 sta_id;
+	u8 reserved1;
+	__le16 flags;
+	__le16 interval;
+	__le16 reserved2;
+} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @values: field per flag in struct iwl_tlc_notif_req_config_cmd
+ */
+struct iwl_tlc_update_notif {
+	u8 sta_id;
+	u8 reserved;
+	__le16 flags;
+	__le32 values[16];
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */
+
+/**
+ * enum iwl_tlc_debug_flags - debug options
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
+ *	frames
+ * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
+ *	driver, in msec
+ * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
+ * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
+ *	threshold should not start an aggregation session
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
+ * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
+ * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
+ * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
+ */
+enum iwl_tlc_debug_flags {
+	IWL_TLC_DEBUG_FIXED_RATE,
+	IWL_TLC_DEBUG_STATS_TH,
+	IWL_TLC_DEBUG_STATS_TIME_TH,
+	IWL_TLC_DEBUG_AGG_TIME_LIM,
+	IWL_TLC_DEBUG_AGG_DIS_START_TH,
+	IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+	IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
+	IWL_TLC_DEBUG_START_AC_RATE_IDX,
+	IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
+}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
+
+/**
+ * struct iwl_dhc_tlc_dbg - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @flags: bitmap of %IWL_TLC_DEBUG_\*
+ * @fixed_rate: rate value
+ * @stats_threshold: if number of tx-ed frames is greater, send statistics
+ * @time_threshold: statistics threshold in usec
+ * @agg_time_lim: max agg time
+ * @agg_dis_start_threshold: frames with try-cont greater than this count will
+ *			     not be aggregated
+ * @agg_frame_count_lim: agg size
+ * @addba_retry_delay: delay between retries of ADD BA
+ * @start_ac_rate_idx: frames per second to start a BA session
+ * @no_far_range_tweak: disable BW scaling
+ * @reserved2: reserved
+ */
+struct iwl_dhc_tlc_cmd {
+	u8 sta_id;
+	u8 reserved1[3];
+	__le32 flags;
+	__le32 fixed_rate;
+	__le16 stats_threshold;
+	__le16 time_threshold;
+	__le16 agg_time_lim;
+	__le16 agg_dis_start_threshold;
+	__le16 agg_frame_count_lim;
+	__le16 addba_retry_delay;
+	u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
+	u8 no_far_range_tweak;
+	u8 reserved2[3];
+} __packed;
+
 /*
  * These serve as indexes into
  * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
@@ -253,7 +514,6 @@ enum {
 #define RATE_MCS_ANT_ABC_MSK		(RATE_MCS_ANT_AB_MSK | \
 					 RATE_MCS_ANT_C_MSK)
 #define RATE_MCS_ANT_MSK		RATE_MCS_ANT_ABC_MSK
-#define RATE_MCS_ANT_NUM 3
 
 /* Bit 17: (0) SS, (1) SS*2 */
 #define RATE_MCS_STBC_POS		17
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index f5d5ba7..a2a40b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -121,7 +121,7 @@ enum iwl_tx_flags {
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
 /**
- * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000
  * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
  * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
  *	to a secured STA
@@ -301,7 +301,7 @@ struct iwl_dram_sec_info {
 } __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
 
 /**
- * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
  * ( TX_CMD = 0x1c )
  * @len: in bytes of the payload, see below for details
  * @offload_assist: TX offload configuration
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 8106fd4..67aefc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -964,7 +964,20 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
 	if (trigger)
 		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
 
-	if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW,
+	/*
+	 * If the loading of the FW completed successfully, the next step is to
+	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
+	 * zero, the FW was already loaded successully. If the state is "NO_FW"
+	 * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+	 * can try to collect the data, since FW might just not be fully
+	 * loaded (no "ALIVE" yet), and the debug data is accessible.
+	 *
+	 * Corner case: got the FW alive but crashed before getting the SMEM
+	 *	config. In such a case, due to HW access problems, we might
+	 *	collect garbage.
+	 */
+	if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
+		 fwrt->smem_cfg.num_lmacs,
 		 "Can't collect dbg data when FW isn't alive\n"))
 		return -EIO;
 
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 37a5c5b..4687d01 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -246,8 +246,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
  * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
  * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
- * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to
- *	include information about ACL time sharing.
  * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
  *	indicating low latency direction.
  *
@@ -267,7 +265,6 @@ enum iwl_ucode_tlv_api {
 	IWL_UCODE_TLV_API_ADAPTIVE_DWELL	= (__force iwl_ucode_tlv_api_t)32,
 	IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE	= (__force iwl_ucode_tlv_api_t)34,
 	IWL_UCODE_TLV_API_NEW_RX_STATS		= (__force iwl_ucode_tlv_api_t)35,
-	IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL	= (__force iwl_ucode_tlv_api_t)37,
 	IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY	= (__force iwl_ucode_tlv_api_t)38,
 
 	NUM_IWL_UCODE_TLV_API
@@ -313,6 +310,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
  * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
  * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
  * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -367,6 +365,7 @@ enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT		= (__force iwl_ucode_tlv_capa_t)39,
 	IWL_UCODE_TLV_CAPA_CDB_SUPPORT			= (__force iwl_ucode_tlv_capa_t)40,
 	IWL_UCODE_TLV_CAPA_D0I3_END_FIRST		= (__force iwl_ucode_tlv_capa_t)41,
+	IWL_UCODE_TLV_CAPA_TLC_OFFLOAD                  = (__force iwl_ucode_tlv_capa_t)43,
 	IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE		= (__force iwl_ucode_tlv_capa_t)64,
 	IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS		= (__force iwl_ucode_tlv_capa_t)65,
 	IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT		= (__force iwl_ucode_tlv_capa_t)67,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
index 7667573..fb4b644 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -63,8 +63,8 @@
 #include "runtime.h"
 #include "fw/api/commands.h"
 
-static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt,
-				      struct iwl_rx_packet *pkt)
+static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
+				       struct iwl_rx_packet *pkt)
 {
 	struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
 	int i, lmac;
@@ -143,8 +143,8 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
 		return;
 
 	pkt = cmd.resp_pkt;
-	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
-		iwl_parse_shared_mem_a000(fwrt, pkt);
+	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
+		iwl_parse_shared_mem_22000(fwrt, pkt);
 	else
 		iwl_parse_shared_mem(fwrt, pkt);
 
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e21e46c..258d439 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -89,7 +89,7 @@ enum iwl_device_family {
 	IWL_DEVICE_FAMILY_7000,
 	IWL_DEVICE_FAMILY_8000,
 	IWL_DEVICE_FAMILY_9000,
-	IWL_DEVICE_FAMILY_A000,
+	IWL_DEVICE_FAMILY_22000,
 };
 
 /*
@@ -266,7 +266,7 @@ struct iwl_tt_params {
 #define OTP_LOW_IMAGE_SIZE_FAMILY_7000	(16 * 512 * sizeof(u16)) /* 16 KB */
 #define OTP_LOW_IMAGE_SIZE_FAMILY_8000	(32 * 512 * sizeof(u16)) /* 32 KB */
 #define OTP_LOW_IMAGE_SIZE_FAMILY_9000	OTP_LOW_IMAGE_SIZE_FAMILY_8000
-#define OTP_LOW_IMAGE_SIZE_FAMILY_A000	OTP_LOW_IMAGE_SIZE_FAMILY_9000
+#define OTP_LOW_IMAGE_SIZE_FAMILY_22000	OTP_LOW_IMAGE_SIZE_FAMILY_9000
 
 struct iwl_eeprom_params {
 	const u8 regulatory_bands[7];
@@ -330,7 +330,7 @@ struct iwl_pwr_tx_backoff {
  * @vht_mu_mimo_supported: VHT MU-MIMO support
  * @rf_id: need to read rf_id to determine the firmware image
  * @integrated: discrete or integrated
- * @gen2: a000 and on transport operation
+ * @gen2: 22000 and on transport operation
  * @cdb: CDB support
  * @nvm_type: see &enum iwl_nvm_type
  * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
@@ -477,13 +477,13 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
 extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr;
-extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
-extern const struct iwl_cfg iwla000_2ac_cfg_jf;
-extern const struct iwl_cfg iwla000_2ax_cfg_hr;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0;
-extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
+extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4b224d7..de8f6ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -919,9 +919,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 			minor = le32_to_cpup(ptr++);
 			local_comp = le32_to_cpup(ptr);
 
-			snprintf(drv->fw.fw_version,
-				 sizeof(drv->fw.fw_version), "%u.%u.%u",
-				 major, minor, local_comp);
+			if (major >= 35)
+				snprintf(drv->fw.fw_version,
+					 sizeof(drv->fw.fw_version),
+					"%u.%08x.%u", major, minor, local_comp);
+			else
+				snprintf(drv->fw.fw_version,
+					 sizeof(drv->fw.fw_version),
+					"%u.%u.%u", major, minor, local_comp);
 			break;
 			}
 		case IWL_UCODE_TLV_FW_DBG_DEST: {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 66e5db41..11789ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -121,7 +121,7 @@
 #define FH_MEM_CBBC_16_19_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xC00)
 #define FH_MEM_CBBC_20_31_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xB20)
 #define FH_MEM_CBBC_20_31_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xB80)
-/* a000 TFD table address, 64 bit */
+/* 22000 TFD table address, 64 bit */
 #define TFH_TFDQ_CBB_TABLE			(0x1C00)
 
 /* Find TFD CB base pointer for given queue */
@@ -140,7 +140,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
 	return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
 }
 
-/* a000 configuration registers */
+/* 22000 configuration registers */
 
 /*
  * TFH Configuration register.
@@ -697,8 +697,8 @@ struct iwl_tfh_tb {
  * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
  * Both driver and device share these circular buffers, each of which must be
  * contiguous 256 TFDs.
- * For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
- * For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
  *
  * Driver must indicate the physical address of the base of each
  * circular buffer via the FH_MEM_CBBC_QUEUE registers.
@@ -750,10 +750,10 @@ struct iwl_tfh_tfd {
 /**
  * struct iwlagn_schedq_bc_tbl scheduler byte count table
  *	base physical address provided by SCD_DRAM_BASE_ADDR
- * For devices up to a000:
+ * For devices up to 22000:
  * @tfd_offset  0-12 - tx command byte count
  *		12-16 - station index
- * For a000 and on:
+ * For 22000 and on:
  * @tfd_offset  0-12 - tx command byte count
  *		12-13 - number of 64 byte chunks
  *		14-16 - reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 921cab9..84ae1e2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -551,7 +551,7 @@ struct iwl_trans_ops {
 			   unsigned int queue_wdg_timeout);
 	void (*txq_disable)(struct iwl_trans *trans, int queue,
 			    bool configure_scd);
-	/* a000 functions */
+	/* 22000 functions */
 	int (*txq_alloc)(struct iwl_trans *trans,
 			 struct iwl_tx_queue_cfg_cmd *cmd,
 			 int cmd_id,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index a47635c..9ffd219 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@
 obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
-iwlmvm-y += scan.o time-event.o rs.o
+iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
 iwlmvm-y += power.o coex.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 79c80f1..890dbff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -7,7 +7,6 @@
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +33,6 @@
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2017        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -514,36 +512,17 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-	if (!iwl_mvm_has_new_ats_coex_api(mvm)) {
-		struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data;
-
-		mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0];
-		mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1];
-		mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2];
-		mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3];
-		mvm->last_bt_notif.msg_idx = v4->msg_idx;
-		mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance;
-		mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut;
-		mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut;
-		mvm->last_bt_notif.bt_activity_grading =
-			v4->bt_activity_grading;
-		mvm->last_bt_notif.ttc_status = v4->ttc_status;
-		mvm->last_bt_notif.rrc_status = v4->rrc_status;
-	} else {
-		/* save this notification for future use: rssi fluctuations */
-		memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
-	}
-
 	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n",
-		       mvm->last_bt_notif.bt_ci_compliance);
+	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
 	IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
-		       le32_to_cpu(mvm->last_bt_notif.primary_ch_lut));
+		       le32_to_cpu(notif->primary_ch_lut));
 	IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
-		       le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut));
+		       le32_to_cpu(notif->secondary_ch_lut));
 	IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
-		       le32_to_cpu(mvm->last_bt_notif.bt_activity_grading));
+		       le32_to_cpu(notif->bt_activity_grading));
 
+	/* remember this notification for future use: rssi fluctuations */
+	memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index b1f73dc..0e6cf39 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -429,231 +429,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
 	return err;
 }
 
-enum iwl_mvm_tcp_packet_type {
-	MVM_TCP_TX_SYN,
-	MVM_TCP_RX_SYNACK,
-	MVM_TCP_TX_DATA,
-	MVM_TCP_RX_ACK,
-	MVM_TCP_RX_WAKE,
-	MVM_TCP_TX_FIN,
-};
-
-static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
-{
-	__sum16 check = tcp_v4_check(len, saddr, daddr, 0);
-	return cpu_to_le16(be16_to_cpu((__force __be16)check));
-}
-
-static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
-				     struct cfg80211_wowlan_tcp *tcp,
-				     void *_pkt, u8 *mask,
-				     __le16 *pseudo_hdr_csum,
-				     enum iwl_mvm_tcp_packet_type ptype)
-{
-	struct {
-		struct ethhdr eth;
-		struct iphdr ip;
-		struct tcphdr tcp;
-		u8 data[];
-	} __packed *pkt = _pkt;
-	u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
-	int i;
-
-	pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
-	pkt->ip.version = 4;
-	pkt->ip.ihl = 5;
-	pkt->ip.protocol = IPPROTO_TCP;
-
-	switch (ptype) {
-	case MVM_TCP_TX_SYN:
-	case MVM_TCP_TX_DATA:
-	case MVM_TCP_TX_FIN:
-		memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
-		memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
-		pkt->ip.ttl = 128;
-		pkt->ip.saddr = tcp->src;
-		pkt->ip.daddr = tcp->dst;
-		pkt->tcp.source = cpu_to_be16(tcp->src_port);
-		pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
-		/* overwritten for TX SYN later */
-		pkt->tcp.doff = sizeof(struct tcphdr) / 4;
-		pkt->tcp.window = cpu_to_be16(65000);
-		break;
-	case MVM_TCP_RX_SYNACK:
-	case MVM_TCP_RX_ACK:
-	case MVM_TCP_RX_WAKE:
-		memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
-		memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
-		pkt->ip.saddr = tcp->dst;
-		pkt->ip.daddr = tcp->src;
-		pkt->tcp.source = cpu_to_be16(tcp->dst_port);
-		pkt->tcp.dest = cpu_to_be16(tcp->src_port);
-		break;
-	default:
-		WARN_ON(1);
-		return;
-	}
-
-	switch (ptype) {
-	case MVM_TCP_TX_SYN:
-		/* firmware assumes 8 option bytes - 8 NOPs for now */
-		memset(pkt->data, 0x01, 8);
-		ip_tot_len += 8;
-		pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
-		pkt->tcp.syn = 1;
-		break;
-	case MVM_TCP_TX_DATA:
-		ip_tot_len += tcp->payload_len;
-		memcpy(pkt->data, tcp->payload, tcp->payload_len);
-		pkt->tcp.psh = 1;
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_TX_FIN:
-		pkt->tcp.fin = 1;
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_RX_SYNACK:
-		pkt->tcp.syn = 1;
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_RX_ACK:
-		pkt->tcp.ack = 1;
-		break;
-	case MVM_TCP_RX_WAKE:
-		ip_tot_len += tcp->wake_len;
-		pkt->tcp.psh = 1;
-		pkt->tcp.ack = 1;
-		memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
-		break;
-	}
-
-	switch (ptype) {
-	case MVM_TCP_TX_SYN:
-	case MVM_TCP_TX_DATA:
-	case MVM_TCP_TX_FIN:
-		pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
-		pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
-		break;
-	case MVM_TCP_RX_WAKE:
-		for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
-			u8 tmp = tcp->wake_mask[i];
-			mask[i + 6] |= tmp << 6;
-			if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
-				mask[i + 7] = tmp >> 2;
-		}
-		/* fall through for ethernet/IP/TCP headers mask */
-	case MVM_TCP_RX_SYNACK:
-	case MVM_TCP_RX_ACK:
-		mask[0] = 0xff; /* match ethernet */
-		/*
-		 * match ethernet, ip.version, ip.ihl
-		 * the ip.ihl half byte is really masked out by firmware
-		 */
-		mask[1] = 0x7f;
-		mask[2] = 0x80; /* match ip.protocol */
-		mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
-		mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
-		mask[5] = 0x80; /* match tcp flags */
-		/* leave rest (0 or set for MVM_TCP_RX_WAKE) */
-		break;
-	};
-
-	*pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
-					    pkt->ip.saddr, pkt->ip.daddr);
-}
-
-static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
-					struct ieee80211_vif *vif,
-					struct cfg80211_wowlan_tcp *tcp)
-{
-	struct iwl_wowlan_remote_wake_config *cfg;
-	struct iwl_host_cmd cmd = {
-		.id = REMOTE_WAKE_CONFIG_CMD,
-		.len = { sizeof(*cfg), },
-		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
-	};
-	int ret;
-
-	if (!tcp)
-		return 0;
-
-	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
-	if (!cfg)
-		return -ENOMEM;
-	cmd.data[0] = cfg;
-
-	cfg->max_syn_retries = 10;
-	cfg->max_data_retries = 10;
-	cfg->tcp_syn_ack_timeout = 1; /* seconds */
-	cfg->tcp_ack_timeout = 1; /* seconds */
-
-	/* SYN (TX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->syn_tx.data, NULL,
-		&cfg->syn_tx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_TX_SYN);
-	cfg->syn_tx.info.tcp_payload_length = 0;
-
-	/* SYN/ACK (RX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
-		&cfg->synack_rx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_RX_SYNACK);
-	cfg->synack_rx.info.tcp_payload_length = 0;
-
-	/* KEEPALIVE/ACK (TX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->keepalive_tx.data, NULL,
-		&cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_TX_DATA);
-	cfg->keepalive_tx.info.tcp_payload_length =
-		cpu_to_le16(tcp->payload_len);
-	cfg->sequence_number_offset = tcp->payload_seq.offset;
-	/* length must be 0..4, the field is little endian */
-	cfg->sequence_number_length = tcp->payload_seq.len;
-	cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
-	cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
-	if (tcp->payload_tok.len) {
-		cfg->token_offset = tcp->payload_tok.offset;
-		cfg->token_length = tcp->payload_tok.len;
-		cfg->num_tokens =
-			cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
-		memcpy(cfg->tokens, tcp->payload_tok.token_stream,
-		       tcp->tokens_size);
-	} else {
-		/* set tokens to max value to almost never run out */
-		cfg->num_tokens = cpu_to_le16(65535);
-	}
-
-	/* ACK (RX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->keepalive_ack_rx.data,
-		cfg->keepalive_ack_rx.rx_mask,
-		&cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_RX_ACK);
-	cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
-
-	/* WAKEUP (RX) */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
-		&cfg->wake_rx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_RX_WAKE);
-	cfg->wake_rx.info.tcp_payload_length =
-		cpu_to_le16(tcp->wake_len);
-
-	/* FIN */
-	iwl_mvm_build_tcp_packet(
-		vif, tcp, cfg->fin_tx.data, NULL,
-		&cfg->fin_tx.info.tcp_pseudo_header_checksum,
-		MVM_TCP_TX_FIN);
-	cfg->fin_tx.info.tcp_payload_length = 0;
-
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
-	kfree(cfg);
-
-	return ret;
-}
-
 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 				struct ieee80211_sta *ap_sta)
 {
@@ -1082,12 +857,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
 	if (ret)
 		return ret;
 
-	ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
-	if (ret)
-		return ret;
-
-	ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
-	return ret;
+	return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
 }
 
 static int
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 2ff594f..270781e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -425,6 +425,50 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+	struct iwl_mvm *mvm = lq_sta->pers.drv;
+	static const size_t bufsz = 2048;
+	char *buff;
+	int desc = 0;
+	ssize_t ret;
+
+	buff = kmalloc(bufsz, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+
+	desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+			  lq_sta->pers.sta_id);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "fixed rate 0x%X\n",
+			  lq_sta->pers.dbg_fixed_rate);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "A-MPDU size limit %d\n",
+			  lq_sta->pers.dbg_agg_frame_count_lim);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "valid_tx_ant %s%s%s\n",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "last tx rate=0x%X ",
+			  lq_sta->last_rate_n_flags);
+
+	desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+				     lq_sta->last_rate_n_flags);
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
 static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
 						char __user *user_buf,
 						size_t count, loff_t *ppos)
@@ -470,8 +514,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
 }
 
 static
-int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
-			   struct iwl_bt_coex_profile_notif *notif, char *buf,
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
 			   int pos, int bufsz)
 {
 	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
@@ -525,12 +568,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm,
 	BT_MBOX_PRINT(3, INBAND_P, false);
 	BT_MBOX_PRINT(3, MSG_TYPE_2, false);
 	BT_MBOX_PRINT(3, SSN_2, false);
-	BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm));
-
-	if (iwl_mvm_has_new_ats_coex_api(mvm)) {
-		BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false);
-		BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true);
-	}
+	BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
 
 	return pos;
 }
@@ -549,7 +587,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 
 	mutex_lock(&mvm->mutex);
 
-	pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz);
+	pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
 
 	pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
 			 notif->bt_ci_compliance);
@@ -721,6 +759,9 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
 
 	mutex_lock(&mvm->mutex);
 
+	if (iwl_mvm_firmware_running(mvm))
+		iwl_mvm_request_statistics(mvm, false);
+
 	pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
 			 "Statistics_Rx - OFDM");
 	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
@@ -936,7 +977,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
 			continue;
 		pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
 				 (int)(ARRAY_SIZE(stats->last_rates) - i));
-		pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+		pos += rs_pretty_print_rate(pos, endpos - pos,
+					    stats->last_rates[idx]);
 	}
 	spin_unlock_bh(&mvm->drv_stats_lock);
 
@@ -1603,6 +1645,19 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
 #define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
 	MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
 
+#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do {	\
+		if (!debugfs_create_file(alias, mode, parent, sta,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
+	MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+
 static ssize_t
 iwl_dbgfs_prph_reg_read(struct file *file,
 			char __user *user_buf,
@@ -1687,6 +1742,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
 MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
 MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(rs_data);
 MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
 MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
@@ -1851,6 +1907,21 @@ static const struct file_operations iwl_dbgfs_mem_ops = {
 	.llseek = default_llseek,
 };
 
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta,
+			     struct dentry *dir)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+		MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR);
+
+	return;
+err:
+	IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+}
+
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 {
 	struct dentry *bcast_dir __maybe_unused;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index c0de7bb..0920be6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -297,7 +297,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 	if (ret) {
 		struct iwl_trans *trans = mvm->trans;
 
-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
+		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
 			IWL_ERR(mvm,
 				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
 				iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
@@ -923,11 +923,11 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
 
 	ret = iwl_run_init_mvm_ucode(mvm, false);
 
-	if (iwlmvm_mod_params.init_dbg)
-		return 0;
-
 	if (ret) {
 		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+		if (iwlmvm_mod_params.init_dbg)
+			return 0;
 		return ret;
 	}
 
@@ -998,9 +998,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 		goto error;
 
 	/* Init RSS configuration */
-	/* TODO - remove a000 disablement when we have RXQ config API */
+	/* TODO - remove 22000 disablement when we have RXQ config API */
 	if (iwl_mvm_has_new_rx_api(mvm) &&
-	    mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) {
+	    mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
 		ret = iwl_send_rss_cfg_cmd(mvm);
 		if (ret) {
 			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
@@ -1111,7 +1111,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
 	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
 	return 0;
  error:
-	if (!iwlmvm_mod_params.init_dbg)
+	if (!iwlmvm_mod_params.init_dbg || !ret)
 		iwl_mvm_stop_device(mvm);
 	return ret;
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3e92a11..4f568652 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -114,29 +114,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
 	},
 };
 
-#ifdef CONFIG_PM_SLEEP
-static const struct nl80211_wowlan_tcp_data_token_feature
-iwl_mvm_wowlan_tcp_token_feature = {
-	.min_len = 0,
-	.max_len = 255,
-	.bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
-};
-
-static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
-	.tok = &iwl_mvm_wowlan_tcp_token_feature,
-	.data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
-			    sizeof(struct ethhdr) -
-			    sizeof(struct iphdr) -
-			    sizeof(struct tcphdr),
-	.data_interval_max = 65535, /* __le16 in API */
-	.wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
-			    sizeof(struct ethhdr) -
-			    sizeof(struct iphdr) -
-			    sizeof(struct tcphdr),
-	.seq = true,
-};
-#endif
-
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 /*
  * Use the reserved field to indicate magic values.
@@ -443,6 +420,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
 	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
 	ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
+		ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+		ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	}
+
 	if (iwl_mvm_has_new_rx_api(mvm))
 		ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
 
@@ -477,7 +460,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 	/* this is the case for CCK frames, it's better (only 8) for OFDM */
 	hw->radiotap_timestamp.accuracy = 22;
 
-	hw->rate_control_algorithm = "iwl-mvm-rs";
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+		hw->rate_control_algorithm = RS_NAME;
+
 	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
@@ -702,7 +687,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 		mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
 		mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
 		mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
-		mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
 		hw->wiphy->wowlan = &mvm->wowlan;
 	}
 #endif
@@ -3216,6 +3200,10 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
 			   duration, type);
 
+	/*
+	 * Flush the done work, just in case it's still pending, so that
+	 * the work it does can complete and we can accept new frames.
+	 */
 	flush_work(&mvm->roc_done_wk);
 
 	mutex_lock(&mvm->mutex);
@@ -4301,7 +4289,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 			   mvm->trans->num_rx_queues);
 
 	/* TODO - remove this when we have RXQ config API */
-	if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
+	if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
 		qmask = BIT(0);
 		if (notif->sync)
 			atomic_set(&mvm->queue_sync_counter, 1);
@@ -4414,4 +4402,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
 #endif
 	.get_survey = iwl_mvm_mac_get_survey,
 	.sta_statistics = iwl_mvm_mac_sta_statistics,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
+#endif
 };
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 55ab534..5ecba2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1248,7 +1248,7 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
 static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
 {
 	/* TODO - better define this */
-	return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000;
+	return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
 }
 
 static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
@@ -1272,12 +1272,6 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
 			  IWL_UCODE_TLV_API_NEW_RX_STATS);
 }
 
-static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm)
-{
-	return fw_has_api(&mvm->fw->ucode_capa,
-			  IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL);
-}
-
 static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
 {
 	return fw_has_api(&mvm->fw->ucode_capa,
@@ -1600,9 +1594,9 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 /* rate scaling */
 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
-int rs_pretty_print_rate(char *buf, const u32 rate);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-			 struct iwl_lq_sta *lq_sta,
+			 struct iwl_mvm_sta *mvmsta,
 			 struct ieee80211_rx_status *rx_status);
 
 /* power management */
@@ -1882,5 +1876,11 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
 
 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta,
+			     struct dentry *dir);
+#endif
 
 #endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 45470b6..aab4aea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -127,11 +127,8 @@ static int __init iwl_mvm_init(void)
 	}
 
 	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
-
-	if (ret) {
+	if (ret)
 		pr_err("Unable to register MVM op_mode: %d\n", ret);
-		iwl_mvm_rate_control_unregister();
-	}
 
 	return ret;
 }
@@ -750,7 +747,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 	mutex_lock(&mvm->mutex);
 	iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
 	err = iwl_run_init_mvm_ucode(mvm, true);
-	if (!iwlmvm_mod_params.init_dbg)
+	if (!iwlmvm_mod_params.init_dbg || !err)
 		iwl_mvm_stop_device(mvm);
 	iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
 	mutex_unlock(&mvm->mutex);
@@ -1021,6 +1018,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
 		iwl_mvm_rx_queue_notif(mvm, rxb, 0);
 	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
 		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+	else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF))
+		iwl_mvm_tlc_update_notif(mvm, pkt);
 	else
 		iwl_mvm_rx_common(mvm, rxb, pkt);
 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
new file mode 100644
index 0000000..55d1274
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -0,0 +1,314 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+	switch (sta->bandwidth) {
+	case IEEE80211_STA_RX_BW_160:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ;
+	case IEEE80211_STA_RX_BW_80:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ;
+	case IEEE80211_STA_RX_BW_40:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ;
+	case IEEE80211_STA_RX_BW_20:
+	default:
+		return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ;
+	}
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+	u8 fw_chains = 0;
+
+	if (chains & ANT_A)
+		fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+	if (chains & ANT_B)
+		fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+	if (chains & ANT_C)
+		fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK;
+
+	return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	u8 supp = 0;
+
+	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+		supp |= IWL_TLC_MNG_SGI_20MHZ_MSK;
+	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+		supp |= IWL_TLC_MNG_SGI_40MHZ_MSK;
+	if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+		supp |= IWL_TLC_MNG_SGI_80MHZ_MSK;
+	if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+		supp |= IWL_TLC_MNG_SGI_160MHZ_MSK;
+
+	return supp;
+}
+
+static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
+				  struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	bool vht_ena = vht_cap && vht_cap->vht_supported;
+	u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK |
+		    IWL_TLC_MNG_CFG_FLAGS_DCM_MSK |
+		    IWL_TLC_MNG_CFG_FLAGS_DD_MSK;
+
+	if (mvm->cfg->ht_params->stbc &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+	     (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+	if (mvm->cfg->ht_params->ldpc &&
+	    ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+	     (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK;
+
+	return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+				   int nss)
+{
+	u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+		(0x3 << (2 * (nss - 1)));
+	rx_mcs >>= (2 * (nss - 1));
+
+	switch (rx_mcs) {
+	case IEEE80211_VHT_MCS_SUPPORT_0_7:
+		return IWL_TLC_MNG_HT_RATE_MCS7;
+	case IEEE80211_VHT_MCS_SUPPORT_0_8:
+		return IWL_TLC_MNG_HT_RATE_MCS8;
+	case IEEE80211_VHT_MCS_SUPPORT_0_9:
+		return IWL_TLC_MNG_HT_RATE_MCS9;
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+
+	return 0;
+}
+
+static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta,
+					struct ieee80211_sta_vht_cap *vht_cap,
+					struct iwl_tlc_config_cmd *cmd)
+{
+	u16 supp;
+	int i, highest_mcs;
+
+	for (i = 0; i < sta->rx_nss; i++) {
+		if (i == MAX_RS_ANT_NUM)
+			break;
+
+		highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+		if (!highest_mcs)
+			continue;
+
+		supp = BIT(highest_mcs + 1) - 1;
+		if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+			supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+		cmd->ht_supp_rates[i] = cpu_to_le16(supp);
+	}
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
+				 struct ieee80211_supported_band *sband,
+				 struct iwl_tlc_config_cmd *cmd)
+{
+	int i;
+	unsigned long tmp;
+	unsigned long supp; /* must be unsigned long for for_each_set_bit */
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+	/* non HT rates */
+	supp = 0;
+	tmp = sta->supp_rates[sband->band];
+	for_each_set_bit(i, &tmp, BITS_PER_LONG)
+		supp |= BIT(sband->bitrates[i].hw_value);
+
+	cmd->non_ht_supp_rates = cpu_to_le16(supp);
+	cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+	/* HT/VHT rates */
+	if (vht_cap && vht_cap->vht_supported) {
+		cmd->mode = IWL_TLC_MNG_MODE_VHT;
+		rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+	} else if (ht_cap && ht_cap->ht_supported) {
+		cmd->mode = IWL_TLC_MNG_MODE_HT;
+		cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+		cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+	}
+}
+
+static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id)
+{
+	u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0);
+	struct iwl_tlc_notif_req_config_cmd cfg_cmd = {
+		.sta_id = sta_id,
+		.flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK),
+		.interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL),
+	};
+	int ret;
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret);
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+	struct iwl_tlc_update_notif *notif;
+	struct iwl_mvm_sta *mvmsta;
+	struct iwl_lq_sta_rs_fw *lq_sta;
+
+	notif = (void *)pkt->data;
+	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
+
+	if (!mvmsta) {
+		IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+			notif->sta_id);
+		return;
+	}
+
+	lq_sta = &mvmsta->lq_sta.rs_fw;
+
+	if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) {
+		lq_sta->last_rate_n_flags =
+			le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]);
+		IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+			       lq_sta->last_rate_n_flags);
+	}
+}
+
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		     enum nl80211_band band)
+{
+	struct ieee80211_hw *hw = mvm->hw;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+	u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+	struct ieee80211_supported_band *sband;
+	struct iwl_tlc_config_cmd cfg_cmd = {
+		.sta_id = mvmsta->sta_id,
+		.max_supp_ch_width = rs_fw_bw_from_sta_bw(sta),
+		.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+		.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+		.max_supp_ss = sta->rx_nss,
+		.max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize),
+		.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+	};
+	int ret;
+
+	memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	iwl_mvm_reset_frame_stats(mvm);
+#endif
+	sband = hw->wiphy->bands[band];
+	rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+
+	rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			bool enable)
+{
+	/* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+	IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+	return 0;
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+	IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+	lq_sta->pers.drv = mvm;
+	lq_sta->pers.sta_id = mvmsta->sta_id;
+	lq_sta->pers.chains = 0;
+	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+	lq_sta->pers.last_rssi = S8_MIN;
+	lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index c69515e..56b3cf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -42,8 +42,6 @@
 #include "mvm.h"
 #include "debugfs.h"
 
-#define RS_NAME "iwl-mvm-rs"
-
 #define IWL_RATE_MAX_WINDOW		62	/* # tx in history window */
 
 /* Calculations of success ratio are done in fixed point where 12800 is 100%.
@@ -809,7 +807,7 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm,
 		return -EINVAL;
 
 	if (tbl->column != RS_COLUMN_INVALID) {
-		struct lq_sta_pers *pers = &mvmsta->lq_sta.pers;
+		struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
 
 		pers->tx_stats[tbl->column][scale_index].total += attempts;
 		pers->tx_stats[tbl->column][scale_index].success += successes;
@@ -1206,7 +1204,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
 	u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
 	/* Treat uninitialized rate scaling data same as non-existing. */
 	if (!lq_sta) {
@@ -1416,13 +1414,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 /*
  * mac80211 sends us Tx status
  */
-static void rs_mac80211_tx_status(void *mvm_r,
-				  struct ieee80211_supported_band *sband,
-				  struct ieee80211_sta *sta, void *priv_sta,
-				  struct sk_buff *skb)
+static void rs_drv_mac80211_tx_status(void *mvm_r,
+				      struct ieee80211_supported_band *sband,
+				      struct ieee80211_sta *sta, void *priv_sta,
+				      struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_r;
+	struct iwl_op_mode *op_mode = mvm_r;
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
@@ -1877,12 +1875,10 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
 	struct rs_rate *rate = &search_tbl->rate;
 	const struct rs_tx_column *column = &rs_tx_columns[col_id];
 	const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
-	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
-		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
 	unsigned long rate_mask = 0;
 	u32 rate_idx = 0;
 
-	memcpy(search_tbl, tbl, sz);
+	memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
 
 	rate->sgi = column->sgi;
 	rate->ant = column->ant;
@@ -2787,9 +2783,10 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
 
 /* Save info about RSSI of last Rx */
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-			 struct iwl_lq_sta *lq_sta,
+			 struct iwl_mvm_sta *mvmsta,
 			 struct ieee80211_rx_status *rx_status)
 {
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 	int i;
 
 	lq_sta->pers.chains = rx_status->chains;
@@ -2858,15 +2855,15 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
 	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init);
 }
 
-static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
-			struct ieee80211_tx_rate_control *txrc)
+static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+			    void *mvm_sta,
+			    struct ieee80211_tx_rate_control *txrc)
 {
-	struct sk_buff *skb = txrc->skb;
-	struct iwl_op_mode *op_mode __maybe_unused =
-			(struct iwl_op_mode *)mvm_r;
+	struct iwl_op_mode *op_mode = mvm_r;
 	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+	struct sk_buff *skb = txrc->skb;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-	struct iwl_lq_sta *lq_sta = mvm_sta;
+	struct iwl_lq_sta *lq_sta;
 	struct rs_rate *optimal_rate;
 	u32 last_ucode_rate;
 
@@ -2878,18 +2875,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 		mvm_sta = NULL;
 	}
 
-	/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
-
-	/* Treat uninitialized rate scaling data same as non-existing. */
-	if (lq_sta && !lq_sta->pers.drv) {
-		IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
-		mvm_sta = NULL;
-	}
-
 	/* Send management frames and NO_ACK data using lowest rate. */
 	if (rate_control_send_low(sta, mvm_sta, txrc))
 		return;
 
+	if (!mvm_sta)
+		return;
+
+	lq_sta = mvm_sta;
 	iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
 				  info->band, &info->control.rates[0]);
 	info->control.rates[0].count = 1;
@@ -2906,13 +2899,13 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 	}
 }
 
-static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
-			  gfp_t gfp)
+static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+			      gfp_t gfp)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
 	struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
-	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
 	IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
 
@@ -2926,7 +2919,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
 	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
 	lq_sta->pers.last_rssi = S8_MIN;
 
-	return &mvmsta->lq_sta;
+	return lq_sta;
 }
 
 static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
@@ -3043,7 +3036,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
 {
 	spin_lock_bh(&mvm->drv_stats_lock);
 	memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
@@ -3111,15 +3104,15 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
 /*
  * Called after adding a new station to initialize rate scaling
  */
-void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			  enum nl80211_band band, bool init)
+static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			     enum nl80211_band band, bool init)
 {
 	int i, j;
 	struct ieee80211_hw *hw = mvm->hw;
 	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
 	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 	struct ieee80211_supported_band *sband;
 	unsigned long supp; /* must be unsigned long for for_each_set_bit */
 
@@ -3194,16 +3187,15 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 	rs_initialize_lq(mvm, sta, lq_sta, band, init);
 }
 
-static void rs_rate_update(void *mvm_r,
-			   struct ieee80211_supported_band *sband,
-			   struct cfg80211_chan_def *chandef,
-			   struct ieee80211_sta *sta, void *priv_sta,
-			   u32 changed)
+static void rs_drv_rate_update(void *mvm_r,
+			       struct ieee80211_supported_band *sband,
+			       struct cfg80211_chan_def *chandef,
+			       struct ieee80211_sta *sta,
+			       void *priv_sta, u32 changed)
 {
+	struct iwl_op_mode *op_mode = mvm_r;
+	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
 	u8 tid;
-	struct iwl_op_mode *op_mode  =
-			(struct iwl_op_mode *)mvm_r;
-	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
 	if (!iwl_mvm_sta_from_mac80211(sta)->vif)
 		return;
@@ -3385,7 +3377,7 @@ static void rs_bfer_active_iter(void *_data,
 {
 	struct rs_bfer_active_iter_data *data = _data;
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+	struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
 	u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
 
 	if (sta == data->exclude_sta)
@@ -3497,7 +3489,8 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
 
 	/* Disallow BFER on another STA if active and we're a higher priority */
 	if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
-		struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+		struct iwl_lq_cmd *bfersta_lq_cmd =
+			&bfer_mvmsta->lq_sta.rs_drv.lq;
 		u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
 
 		bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
@@ -3569,14 +3562,14 @@ static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
 {
 	return hw->priv;
 }
+
 /* rate scale requires free function to be implemented */
 static void rs_free(void *mvm_rate)
 {
 	return;
 }
 
-static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
-			void *mvm_sta)
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
 {
 	struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
 	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
@@ -3586,7 +3579,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
 {
 
 	char *type, *bw;
@@ -3597,10 +3590,10 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
 	    !(rate & RATE_MCS_VHT_MSK)) {
 		int index = iwl_hwrate_to_plcp_idx(rate);
 
-		return sprintf(buf, "Legacy | ANT: %s Rate: %s Mbps\n",
-			       rs_pretty_ant(ant),
-			       index == IWL_RATE_INVALID ? "BAD" :
-			       iwl_rate_mcs[index].mbps);
+		return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+				 rs_pretty_ant(ant),
+				 index == IWL_RATE_INVALID ? "BAD" :
+				 iwl_rate_mcs[index].mbps);
 	}
 
 	if (rate & RATE_MCS_VHT_MSK) {
@@ -3634,12 +3627,13 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
 		bw = "BAD BW";
 	}
 
-	return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
-		       type, rs_pretty_ant(ant), bw, mcs, nss,
-		       (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
-		       (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
-		       (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
-		       (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+	return scnprintf(buf, bufsz,
+			 "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+			 type, rs_pretty_ant(ant), bw, mcs, nss,
+			 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+			 (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+			 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+			 (rate & RATE_MCS_BF_MSK) ? "BF " : "");
 }
 
 /**
@@ -3696,65 +3690,70 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 	int desc = 0;
 	int i = 0;
 	ssize_t ret;
+	static const size_t bufsz = 2048;
 
 	struct iwl_lq_sta *lq_sta = file->private_data;
 	struct iwl_mvm_sta *mvmsta =
-		container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+		container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
 	struct iwl_mvm *mvm;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	struct rs_rate *rate = &tbl->rate;
 	u32 ss_params;
 
 	mvm = lq_sta->pers.drv;
-	buff = kmalloc(2048, GFP_KERNEL);
+	buff = kmalloc(bufsz, GFP_KERNEL);
 	if (!buff)
 		return -ENOMEM;
 
-	desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
-	desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
-			lq_sta->total_failed, lq_sta->total_success,
-			lq_sta->active_legacy_rate);
-	desc += sprintf(buff+desc, "fixed rate 0x%X\n",
-			lq_sta->pers.dbg_fixed_rate);
-	desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "sta_id %d\n", lq_sta->lq.sta_id);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "failed=%d success=%d rate=0%lX\n",
+			  lq_sta->total_failed, lq_sta->total_success,
+			  lq_sta->active_legacy_rate);
+	desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
+			  lq_sta->pers.dbg_fixed_rate);
+	desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
 	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
 	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
 	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
-	desc += sprintf(buff+desc, "lq type %s\n",
-			(is_legacy(rate)) ? "legacy" :
-			is_vht(rate) ? "VHT" : "HT");
+	desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
+			  (is_legacy(rate)) ? "legacy" :
+			  is_vht(rate) ? "VHT" : "HT");
 	if (!is_legacy(rate)) {
-		desc += sprintf(buff + desc, " %s",
+		desc += scnprintf(buff + desc, bufsz - desc, " %s",
 		   (is_siso(rate)) ? "SISO" : "MIMO2");
-		desc += sprintf(buff + desc, " %s",
+		desc += scnprintf(buff + desc, bufsz - desc, " %s",
 				(is_ht20(rate)) ? "20MHz" :
 				(is_ht40(rate)) ? "40MHz" :
 				(is_ht80(rate)) ? "80MHz" :
 				(is_ht160(rate)) ? "160MHz" : "BAD BW");
-		desc += sprintf(buff + desc, " %s %s %s %s\n",
+		desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
 				(rate->sgi) ? "SGI" : "NGI",
 				(rate->ldpc) ? "LDPC" : "BCC",
 				(lq_sta->is_agg) ? "AGG on" : "",
 				(mvmsta->tlc_amsdu) ? "AMSDU on" : "");
 	}
-	desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+	desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
 			lq_sta->last_rate_n_flags);
-	desc += sprintf(buff+desc,
+	desc += scnprintf(buff + desc, bufsz - desc,
 			"general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
 			lq_sta->lq.flags,
 			lq_sta->lq.mimo_delim,
 			lq_sta->lq.single_stream_ant_msk,
 			lq_sta->lq.dual_stream_ant_msk);
 
-	desc += sprintf(buff+desc,
+	desc += scnprintf(buff + desc, bufsz - desc,
 			"agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
 			le16_to_cpu(lq_sta->lq.agg_time_limit),
 			lq_sta->lq.agg_disable_start_th,
 			lq_sta->lq.agg_frame_cnt_limit);
 
-	desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
+	desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
+			  lq_sta->lq.reduced_tpc);
 	ss_params = le32_to_cpu(lq_sta->lq.ss_params);
-	desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n",
+	desc += scnprintf(buff + desc, bufsz - desc,
+			"single stream params: %s%s%s%s\n",
 			(ss_params & LQ_SS_PARAMS_VALID) ?
 			"VALID" : "INVALID",
 			(ss_params & LQ_SS_BFER_ALLOWED) ?
@@ -3763,7 +3762,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 			", STBC" : "",
 			(ss_params & LQ_SS_FORCE) ?
 			", FORCE" : "");
-	desc += sprintf(buff+desc,
+	desc += scnprintf(buff + desc, bufsz - desc,
 			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
 			lq_sta->lq.initial_rate_index[0],
 			lq_sta->lq.initial_rate_index[1],
@@ -3773,8 +3772,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
 		u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
 
-		desc += sprintf(buff+desc, " rate[%d] 0x%X ", i, r);
-		desc += rs_pretty_print_rate(buff+desc, r);
+		desc += scnprintf(buff + desc, bufsz - desc,
+				  " rate[%d] 0x%X ", i, r);
+		desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
@@ -3987,12 +3987,13 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
 
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
 
-static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
+static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
+				   struct dentry *dir)
 {
 	struct iwl_lq_sta *lq_sta = priv_sta;
 	struct iwl_mvm_sta *mvmsta;
 
-	mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+	mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
 
 	if (!mvmsta->vif)
 		return;
@@ -4014,7 +4015,7 @@ static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
 	IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
 }
 
-static void rs_remove_debugfs(void *mvm, void *mvm_sta)
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
 {
 }
 #endif
@@ -4024,50 +4025,53 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
  * the station is added. Since mac80211 calls this function before a
  * station is added we ignore it.
  */
-static void rs_rate_init_stub(void *mvm_r,
-			      struct ieee80211_supported_band *sband,
-			      struct cfg80211_chan_def *chandef,
-			      struct ieee80211_sta *sta, void *mvm_sta)
+static void rs_rate_init_ops(void *mvm_r,
+			     struct ieee80211_supported_band *sband,
+			     struct cfg80211_chan_def *chandef,
+			     struct ieee80211_sta *sta, void *mvm_sta)
 {
 }
 
-static const struct rate_control_ops rs_mvm_ops = {
+/* ops for rate scaling implemented in the driver */
+static const struct rate_control_ops rs_mvm_ops_drv = {
 	.name = RS_NAME,
-	.tx_status = rs_mac80211_tx_status,
-	.get_rate = rs_get_rate,
-	.rate_init = rs_rate_init_stub,
+	.tx_status = rs_drv_mac80211_tx_status,
+	.get_rate = rs_drv_get_rate,
+	.rate_init = rs_rate_init_ops,
 	.alloc = rs_alloc,
 	.free = rs_free,
-	.alloc_sta = rs_alloc_sta,
+	.alloc_sta = rs_drv_alloc_sta,
 	.free_sta = rs_free_sta,
-	.rate_update = rs_rate_update,
+	.rate_update = rs_drv_rate_update,
 #ifdef CONFIG_MAC80211_DEBUGFS
-	.add_sta_debugfs = rs_add_debugfs,
-	.remove_sta_debugfs = rs_remove_debugfs,
+	.add_sta_debugfs = rs_drv_add_sta_debugfs,
+	.remove_sta_debugfs = rs_remove_sta_debugfs,
 #endif
 };
 
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  enum nl80211_band band, bool init)
+{
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+		rs_fw_rate_init(mvm, sta, band);
+	else
+		rs_drv_rate_init(mvm, sta, band, init);
+}
+
 int iwl_mvm_rate_control_register(void)
 {
-	return ieee80211_rate_control_register(&rs_mvm_ops);
+	return ieee80211_rate_control_register(&rs_mvm_ops_drv);
 }
 
 void iwl_mvm_rate_control_unregister(void)
 {
-	ieee80211_rate_control_unregister(&rs_mvm_ops);
+	ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
 }
 
-/**
- * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
- * Tx protection, according to this request and previous requests,
- * and send the LQ command.
- * @mvmsta: The station
- * @enable: Enable Tx protection?
- */
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
-			  bool enable)
+static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+				bool enable)
 {
-	struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+	struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
 
 	lockdep_assert_held(&mvm->mutex);
 
@@ -4083,3 +4087,17 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 
 	return iwl_mvm_send_lq_cmd(mvm, lq, false);
 }
+
+/**
+ * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			  bool enable)
+{
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+		return rs_fw_tx_protection(mvm, mvmsta, enable);
+	else
+		return rs_drv_tx_protection(mvm, mvmsta, enable);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 32b4d66..fb18cb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -36,6 +36,8 @@
 #include "fw-api.h"
 #include "iwl-trans.h"
 
+#define RS_NAME "iwl-mvm-rs"
+
 struct iwl_rs_rate_info {
 	u8 plcp;	  /* uCode API:  IWL_RATE_6M_PLCP, etc. */
 	u8 plcp_ht_siso;  /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
@@ -218,6 +220,38 @@ struct iwl_rate_mcs_info {
 };
 
 /**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+
+	/* persistent fields - initialized only once - keep last! */
+	struct lq_sta_pers_rs_fw {
+		u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+		u32 dbg_fixed_rate;
+		u16 dbg_agg_frame_count_lim;
+#endif
+		u8 chains;
+		s8 chain_signal[IEEE80211_MAX_CHAINS];
+		s8 last_rssi;
+		struct iwl_mvm *drv;
+	} pers;
+};
+
+/**
  * struct iwl_rate_scale_data -- tx success history for one rate
  */
 struct iwl_rate_scale_data {
@@ -407,4 +441,18 @@ struct iwl_mvm_sta;
 int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
 			  bool enable);
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		     enum nl80211_band band);
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			bool enable);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt);
 #endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index d1a4068..63a57f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -383,7 +383,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
 								 false);
 		}
 
-		rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+		rs_update_last_rssi(mvm, mvmsta, rx_status);
 
 		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
 		    ieee80211_is_beacon(hdr->frame_control)) {
@@ -439,7 +439,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
 		rx_status->bw = RATE_INFO_BW_160;
 		break;
 	}
-	if (rate_n_flags & RATE_MCS_SGI_MSK)
+	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+	    rate_n_flags & RATE_MCS_SGI_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 3b8d443..4a70e62 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -943,7 +943,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 								 false);
 		}
 
-		rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+		rs_update_last_rssi(mvm, mvmsta, rx_status);
 
 		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
 		    ieee80211_is_beacon(hdr->frame_control)) {
@@ -1020,7 +1020,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 		rx_status->bw = RATE_INFO_BW_160;
 		break;
 	}
-	if (rate_n_flags & RATE_MCS_SGI_MSK)
+
+	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+	    rate_n_flags & RATE_MCS_SGI_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
 		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index e4fd476..356b16f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -664,6 +664,22 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
 	return newpos;
 }
 
+#define WFA_TPC_IE_LEN	9
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+	pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+	pos[1] = WFA_TPC_IE_LEN - 2;
+	pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+	pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+	pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+	pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+	pos[6] = 0;
+	/* pos[7] - tx power will be inserted by the FW */
+	pos[7] = 0;
+	pos[8] = 0;
+}
+
 static void
 iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			 struct ieee80211_scan_ies *ies,
@@ -716,7 +732,16 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 	memcpy(pos, ies->common_ies, ies->common_ie_len);
 	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
-	params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    !fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+		iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+							   WFA_TPC_IE_LEN);
+	} else {
+		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+	}
 }
 
 static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
@@ -781,7 +806,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
 	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
-	if (iwl_mvm_rrm_scan_needed(mvm))
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
 		flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
 
 	if (params->pass_all)
@@ -1183,7 +1210,9 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
 			flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
 	}
 
-	if (iwl_mvm_rrm_scan_needed(mvm))
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
 
 	if (params->pass_all)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 1add561..9d33f7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1439,6 +1439,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
 			goto err;
 	}
 
+	/*
+	 * if rs is registered with mac80211, then "add station" will be handled
+	 * via the corresponding ops, otherwise need to notify rate scaling here
+	 */
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
+		iwl_mvm_rs_add_sta(mvm, mvm_sta);
+
 update_fw:
 	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
 	if (ret)
@@ -1762,7 +1769,7 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
 	}
 
 	/*
-	 * For a000 firmware and on we cannot add queue to a station unknown
+	 * For 22000 firmware and on we cannot add queue to a station unknown
 	 * to firmware so enable queue here - after the station was added
 	 */
 	if (iwl_mvm_has_new_tx_api(mvm))
@@ -1885,7 +1892,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 		return ret;
 
 	/*
-	 * For a000 firmware and on we cannot add queue to a station unknown
+	 * For 22000 firmware and on we cannot add queue to a station unknown
 	 * to firmware so enable queue here - after the station was added
 	 */
 	if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -2064,7 +2071,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 	/*
 	 * Enable cab queue after the ADD_STA command is sent.
-	 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
+	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
 	 * command with unknown station id, and for FW that doesn't support
 	 * station API since the cab queue is not included in the
 	 * tfd_queue_mask.
@@ -2530,7 +2537,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			    tid_data->next_reclaimed);
 
 	/*
-	 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
 	 * to align the wrap around of ssn so we compare relevant values.
 	 */
 	normalized_ssn = tid_data->ssn;
@@ -2575,6 +2582,14 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 		.aggregate = true,
 	};
 
+	/*
+	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+	 * manager, so this function should never be called in this case.
+	 */
+	if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
+				     IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)))
+		return -EINVAL;
+
 	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
 		     != IWL_MAX_TID_COUNT);
 
@@ -2672,12 +2687,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 	 */
 	mvmsta->max_agg_bufsize =
 		min(mvmsta->max_agg_bufsize, buf_size);
-	mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+	mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
 	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
 		     sta->addr, tid);
 
-	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
+	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
 }
 
 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
@@ -3615,7 +3630,7 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
 	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
 
 	/*
-	 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
 	 * to align the wrap around of ssn so we compare relevant values.
 	 */
 	if (mvm->trans->cfg->gen2)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index aedabe1..5ffd6ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -383,6 +383,8 @@ struct iwl_mvm_rxq_dup_data {
  * and from Tx response flow, it needs a spinlock.
  * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
  * @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ *	the driver - %rs_drv or in the FW - %rs_fw.
  * @reserved_queue: the queue reserved for this STA for DQA purposes
  *	Every STA has is given one reserved queue to allow it to operate. If no
  *	such queue can be guaranteed, the STA addition will fail.
@@ -417,7 +419,10 @@ struct iwl_mvm_sta {
 	spinlock_t lock;
 	struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
 	u8 tid_to_baid[IWL_MAX_TID_COUNT];
-	struct iwl_lq_sta lq_sta;
+	union {
+		struct iwl_lq_sta_rs_fw rs_fw;
+		struct iwl_lq_sta rs_drv;
+	} lq_sta;
 	struct ieee80211_vif *vif;
 	struct iwl_mvm_key_pn __rcu *ptk_pn[4];
 	struct iwl_mvm_rxq_dup_data *dup_data;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index e25cda9..200ab50 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -101,7 +101,6 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 void iwl_mvm_roc_done_wk(struct work_struct *wk)
 {
 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
-	u32 queues = 0;
 
 	/*
 	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
@@ -110,14 +109,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
 	 * in the case that the time event actually completed in the firmware
 	 * (which is handled in iwl_mvm_te_handle_notif).
 	 */
-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
-		queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
-	}
-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
-		queues |= BIT(mvm->aux_queue);
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
 		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
-	}
 
 	synchronize_net();
 
@@ -777,12 +772,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 		return -EBUSY;
 	}
 
-	/*
-	 * Flush the done work, just in case it's still pending, so that
-	 * the work it does can complete and we can accept new frames.
-	 */
-	flush_work(&mvm->roc_done_wk);
-
 	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
 	time_cmd.id_and_color =
 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 333bcb7..84d1652 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1132,7 +1132,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
 	}
 
 	/*
-	 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
 	 * to align the wrap around of ssn so we compare relevant values.
 	 */
 	normalized_ssn = tid_data->ssn;
@@ -1624,7 +1624,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 	int freed;
 
 	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
-		      tid >= IWL_MAX_TID_COUNT,
+		      tid > IWL_MAX_TID_COUNT,
 		      "sta_id %d tid %d", sta_id, tid))
 		return;
 
@@ -1679,7 +1679,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 		if (ieee80211_is_data_qos(hdr->frame_control))
 			freed++;
 		else
-			WARN_ON_ONCE(1);
+			WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
 
 		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
 
@@ -1719,8 +1719,12 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 		ba_info->band = chanctx_conf->def.chan->band;
 		iwl_mvm_hwrate_to_tx_status(rate, ba_info);
 
-		IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
-		iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+		if (!fw_has_capa(&mvm->fw->ucode_capa,
+				 IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
+			IWL_DEBUG_TX_REPLY(mvm,
+					   "No reclaim. Update rs directly\n");
+			iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+		}
 	}
 
 out:
@@ -1771,8 +1775,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 			struct iwl_mvm_compressed_ba_tfd *ba_tfd =
 				&ba_res->tfd[i];
 
+			tid = ba_tfd->tid;
+			if (tid == IWL_MGMT_TID)
+				tid = IWL_MAX_TID_COUNT;
+
 			mvmsta->tid_data[i].lq_color = lq_color;
-			iwl_mvm_tx_reclaim(mvm, sta_id, ba_tfd->tid,
+			iwl_mvm_tx_reclaim(mvm, sta_id, tid,
 					   (int)(le16_to_cpu(ba_tfd->q_num)),
 					   le16_to_cpu(ba_tfd->tfd_index),
 					   &ba_info,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 03ffd84..0b7e29b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -278,8 +278,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
 	u8 ind = last_idx;
 	int i;
 
-	for (i = 0; i < RATE_MCS_ANT_NUM; i++) {
-		ind = (ind + 1) % RATE_MCS_ANT_NUM;
+	for (i = 0; i < MAX_RS_ANT_NUM; i++) {
+		ind = (ind + 1) % MAX_RS_ANT_NUM;
 		if (valid & BIT(ind))
 			return ind;
 	}
@@ -595,6 +595,12 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
 
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 {
+	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+		IWL_ERR(mvm,
+			"DEVICE_ENABLED bit is not set. Aborting dump.\n");
+		return;
+	}
+
 	iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
 
 	if (mvm->error_event_table[1])
@@ -906,7 +912,9 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
 		.data = { lq, },
 	};
 
-	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA))
+	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
+		    fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)))
 		return -EINVAL;
 
 	return iwl_mvm_send_cmd(mvm, &cmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ccd7c33..56fc287 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -652,20 +652,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 	{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
 	{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
 
-/* a000 Series */
-	{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)},
-	{IWL_PCI_DEVICE(0x2720, 0x0000, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x34F0, 0x0070, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x2720, 0x0078, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x2720, 0x0070, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x2720, 0x0030, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x2720, 0x1080, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x2720, 0x0090, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x2720, 0x0310, iwla000_2ac_cfg_hr_cdb)},
-	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwla000_2ax_cfg_hr)},
-	{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwla000_2ax_cfg_hr)},
+/* 22000 Series */
+	{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+	{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
 
 #endif /* CONFIG_IWLMVM */
 
@@ -707,7 +707,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		iwl_trans->cfg = cfg_7265d;
 	}
 
-	if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb &&
+	if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
 	    iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
 		u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
 		u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
@@ -715,14 +715,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 		if (rf_id_chp == jf_chp_id) {
 			if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
-				cfg = &iwla000_2ax_cfg_qnj_jf_b0;
+				cfg = &iwl22000_2ax_cfg_qnj_jf_b0;
 			else
-				cfg = &iwla000_2ac_cfg_jf;
+				cfg = &iwl22000_2ac_cfg_jf;
 		} else if (rf_id_chp == hr_chp_id) {
 			if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
-				cfg = &iwla000_2ax_cfg_qnj_hr_a0;
+				cfg = &iwl22000_2ax_cfg_qnj_hr_a0;
 			else
-				cfg = &iwla000_2ac_cfg_hr;
+				cfg = &iwl22000_2ac_cfg_hr;
 		}
 		iwl_trans->cfg = cfg;
 	}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 4541c86..fbc4536 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3250,9 +3250,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
 		hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
 		if (hw_status & UMAG_GEN_HW_IS_FPGA)
-			trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0;
+			trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
 		else
-			trans->cfg = &iwla000_2ac_cfg_hr;
+			trans->cfg = &iwl22000_2ac_cfg_hr;
 	}
 #endif
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e8189c0..7836737 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -728,16 +728,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
 	    val != PS_MANUAL_POLL)
 		return -EINVAL;
 
+	if (val == PS_MANUAL_POLL) {
+		if (data->ps != PS_ENABLED)
+			return -EINVAL;
+		local_bh_disable();
+		ieee80211_iterate_active_interfaces_atomic(
+			data->hw, IEEE80211_IFACE_ITER_NORMAL,
+			hwsim_send_ps_poll, data);
+		local_bh_enable();
+		return 0;
+	}
 	old_ps = data->ps;
 	data->ps = val;
 
 	local_bh_disable();
-	if (val == PS_MANUAL_POLL) {
-		ieee80211_iterate_active_interfaces_atomic(
-			data->hw, IEEE80211_IFACE_ITER_NORMAL,
-			hwsim_send_ps_poll, data);
-		data->ps_poll_pending = true;
-	} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+	if (old_ps == PS_DISABLED && val != PS_DISABLED) {
 		ieee80211_iterate_active_interfaces_atomic(
 			data->hw, IEEE80211_IFACE_ITER_NORMAL,
 			hwsim_send_nullfunc_ps, data);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 6e0d9a9..ce4432c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1116,6 +1116,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 	enum nl80211_iftype curr_iftype = dev->ieee80211_ptr->iftype;
 
+	if (priv->scan_request) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "change virtual interface: scan in process\n");
+		return -EBUSY;
+	}
+
 	switch (curr_iftype) {
 	case NL80211_IFTYPE_ADHOC:
 		switch (type) {
@@ -1180,7 +1186,6 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
 	case NL80211_IFTYPE_AP:
 		switch (type) {
 		case NL80211_IFTYPE_ADHOC:
-		case NL80211_IFTYPE_STATION:
 			return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
 							       type, params);
 			break;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index e813b2c..8e4e9b6 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -199,7 +199,7 @@ struct mwl8k_priv {
 	struct ieee80211_channel channels_24[14];
 	struct ieee80211_rate rates_24[13];
 	struct ieee80211_supported_band band_50;
-	struct ieee80211_channel channels_50[4];
+	struct ieee80211_channel channels_50[9];
 	struct ieee80211_rate rates_50[8];
 	u32 ap_macids_supported;
 	u32 sta_macids_supported;
@@ -383,6 +383,11 @@ static const struct ieee80211_channel mwl8k_channels_50[] = {
 	{ .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
 	{ .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
 	{ .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5745, .hw_value = 149, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5765, .hw_value = 153, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5785, .hw_value = 157, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5805, .hw_value = 161, },
+	{ .band = NL80211_BAND_5GHZ, .center_freq = 5825, .hw_value = 165, },
 };
 
 static const struct ieee80211_rate mwl8k_rates_50[] = {
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
index 28843fe..92ce406 100644
--- a/drivers/net/wireless/mediatek/Kconfig
+++ b/drivers/net/wireless/mediatek/Kconfig
@@ -11,4 +11,5 @@
 
 if WLAN_VENDOR_MEDIATEK
 source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/Kconfig"
 endif # WLAN_VENDOR_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
index 9d5f182..00f945f 100644
--- a/drivers/net/wireless/mediatek/Makefile
+++ b/drivers/net/wireless/mediatek/Makefile
@@ -1 +1,2 @@
 obj-$(CONFIG_MT7601U)	+= mt7601u/
+obj-$(CONFIG_MT76_CORE)	+= mt76/
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
new file mode 100644
index 0000000..fc05d79
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -0,0 +1,10 @@
+config MT76_CORE
+	tristate
+
+config MT76x2E
+	tristate "MediaTek MT76x2E (PCIe) support"
+	select MT76_CORE
+	depends on MAC80211
+	depends on PCI
+	---help---
+	  This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
new file mode 100644
index 0000000..2bb9198
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -0,0 +1,15 @@
+obj-$(CONFIG_MT76_CORE) += mt76.o
+obj-$(CONFIG_MT76x2E) += mt76x2e.o
+
+mt76-y := \
+	mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o
+
+CFLAGS_trace.o := -I$(src)
+
+mt76x2e-y := \
+	mt76x2_pci.o mt76x2_dma.o \
+	mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
+	mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
+	mt76x2_dfs.o mt76x2_trace.o
+
+CFLAGS_mt76x2_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
new file mode 100644
index 0000000..7c3612a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "mt76.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+	struct mt76_dev *dev = data;
+
+	dev->bus->wr(dev, dev->debugfs_reg, val);
+	return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+	struct mt76_dev *dev = data;
+
+	*val = dev->bus->rr(dev, dev->debugfs_reg);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt76_queues_read(struct seq_file *s, void *data)
+{
+	struct mt76_dev *dev = dev_get_drvdata(s->private);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
+		struct mt76_queue *q = &dev->q_tx[i];
+
+		if (!q->ndesc)
+			continue;
+
+		seq_printf(s,
+			   "%d:	queued=%d head=%d tail=%d swq_queued=%d\n",
+			   i, q->queued, q->head, q->tail, q->swq_queued);
+	}
+
+	return 0;
+}
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+	if (!dir)
+		return NULL;
+
+	debugfs_create_u8("led_pin", S_IRUSR | S_IWUSR, dir, &dev->led_pin);
+	debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+	debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+			    &fops_regval);
+	debugfs_create_blob("eeprom", S_IRUSR, dir, &dev->eeprom);
+	if (dev->otp.data)
+		debugfs_create_blob("otp", S_IRUSR, dir, &dev->otp);
+	debugfs_create_devm_seqfile(dev->dev, "queues", dir, mt76_queues_read);
+
+	return dir;
+}
+EXPORT_SYMBOL_GPL(mt76_register_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
new file mode 100644
index 0000000..ecd409a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include "mt76.h"
+#include "dma.h"
+
+#define DMA_DUMMY_TXWI	((void *) ~0)
+
+static int
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	int size;
+	int i;
+
+	spin_lock_init(&q->lock);
+	INIT_LIST_HEAD(&q->swq);
+
+	size = q->ndesc * sizeof(struct mt76_desc);
+	q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+	if (!q->desc)
+		return -ENOMEM;
+
+	size = q->ndesc * sizeof(*q->entry);
+	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
+	if (!q->entry)
+		return -ENOMEM;
+
+	/* clear descriptors */
+	for (i = 0; i < q->ndesc; i++)
+		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+	iowrite32(q->desc_dma, &q->regs->desc_base);
+	iowrite32(0, &q->regs->cpu_idx);
+	iowrite32(0, &q->regs->dma_idx);
+	iowrite32(q->ndesc, &q->regs->ring_size);
+
+	return 0;
+}
+
+static int
+mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
+		 struct mt76_queue_buf *buf, int nbufs, u32 info,
+		 struct sk_buff *skb, void *txwi)
+{
+	struct mt76_desc *desc;
+	u32 ctrl;
+	int i, idx = -1;
+
+	if (txwi)
+		q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+
+	for (i = 0; i < nbufs; i += 2, buf += 2) {
+		u32 buf0 = buf[0].addr, buf1 = 0;
+
+		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+		if (i < nbufs - 1) {
+			buf1 = buf[1].addr;
+			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+		}
+
+		if (i == nbufs - 1)
+			ctrl |= MT_DMA_CTL_LAST_SEC0;
+		else if (i == nbufs - 2)
+			ctrl |= MT_DMA_CTL_LAST_SEC1;
+
+		idx = q->head;
+		q->head = (q->head + 1) % q->ndesc;
+
+		desc = &q->desc[idx];
+
+		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
+		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+		WRITE_ONCE(desc->info, cpu_to_le32(info));
+		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+		q->queued++;
+	}
+
+	q->entry[idx].txwi = txwi;
+	q->entry[idx].skb = skb;
+
+	return idx;
+}
+
+static void
+mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+			struct mt76_queue_entry *prev_e)
+{
+	struct mt76_queue_entry *e = &q->entry[idx];
+	__le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
+	u32 ctrl = le32_to_cpu(__ctrl);
+
+	if (!e->txwi || !e->skb) {
+		__le32 addr = READ_ONCE(q->desc[idx].buf0);
+		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
+
+		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+				 DMA_TO_DEVICE);
+	}
+
+	if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
+		__le32 addr = READ_ONCE(q->desc[idx].buf1);
+		u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
+
+		dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+				 DMA_TO_DEVICE);
+	}
+
+	if (e->txwi == DMA_DUMMY_TXWI)
+		e->txwi = NULL;
+
+	*prev_e = *e;
+	memset(e, 0, sizeof(*e));
+}
+
+static void
+mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	q->head = ioread32(&q->regs->dma_idx);
+	q->tail = q->head;
+	iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static void
+mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
+{
+	struct mt76_queue *q = &dev->q_tx[qid];
+	struct mt76_queue_entry entry;
+	bool wake = false;
+	int last;
+
+	if (!q->ndesc)
+		return;
+
+	spin_lock_bh(&q->lock);
+	if (flush)
+		last = -1;
+	else
+		last = ioread32(&q->regs->dma_idx);
+
+	while (q->queued && q->tail != last) {
+		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
+		if (entry.schedule)
+			q->swq_queued--;
+
+		if (entry.skb)
+			dev->drv->tx_complete_skb(dev, q, &entry, flush);
+
+		if (entry.txwi) {
+			mt76_put_txwi(dev, entry.txwi);
+			wake = true;
+		}
+
+		q->tail = (q->tail + 1) % q->ndesc;
+		q->queued--;
+
+		if (!flush && q->tail == last)
+			last = ioread32(&q->regs->dma_idx);
+	}
+
+	if (!flush)
+		mt76_txq_schedule(dev, q);
+	else
+		mt76_dma_sync_idx(dev, q);
+
+	wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+	spin_unlock_bh(&q->lock);
+
+	if (wake)
+		ieee80211_wake_queue(dev->hw, qid);
+}
+
+static void *
+mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
+		 int *len, u32 *info, bool *more)
+{
+	struct mt76_queue_entry *e = &q->entry[idx];
+	struct mt76_desc *desc = &q->desc[idx];
+	dma_addr_t buf_addr;
+	void *buf = e->buf;
+	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+	buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+	if (len) {
+		u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
+		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
+		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
+	}
+
+	if (info)
+		*info = le32_to_cpu(desc->info);
+
+	dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+	e->buf = NULL;
+
+	return buf;
+}
+
+static void *
+mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+		 int *len, u32 *info, bool *more)
+{
+	int idx = q->tail;
+
+	*more = false;
+	if (!q->queued)
+		return NULL;
+
+	if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
+		return NULL;
+
+	q->tail = (q->tail + 1) % q->ndesc;
+	q->queued--;
+
+	return mt76_dma_get_buf(dev, q, idx, len, info, more);
+}
+
+static void
+mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	iowrite32(q->head, &q->regs->cpu_idx);
+}
+
+static int
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi)
+{
+	dma_addr_t addr;
+	void *buf;
+	int frames = 0;
+	int len = SKB_WITH_OVERHEAD(q->buf_size);
+	int offset = q->buf_offset;
+	int idx;
+	void *(*alloc)(unsigned int fragsz);
+
+	if (napi)
+		alloc = napi_alloc_frag;
+	else
+		alloc = netdev_alloc_frag;
+
+	spin_lock_bh(&q->lock);
+
+	while (q->queued < q->ndesc - 1) {
+		struct mt76_queue_buf qbuf;
+
+		buf = alloc(q->buf_size);
+		if (!buf)
+			break;
+
+		addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev->dev, addr)) {
+			skb_free_frag(buf);
+			break;
+		}
+
+		qbuf.addr = addr + offset;
+		qbuf.len = len - offset;
+		idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
+		frames++;
+	}
+
+	if (frames)
+		mt76_dma_kick_queue(dev, q);
+
+	spin_unlock_bh(&q->lock);
+
+	return frames;
+}
+
+static void
+mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
+{
+	void *buf;
+	bool more;
+
+	spin_lock_bh(&q->lock);
+	do {
+		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
+		if (!buf)
+			break;
+
+		skb_free_frag(buf);
+	} while (1);
+	spin_unlock_bh(&q->lock);
+}
+
+static void
+mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+	struct mt76_queue *q = &dev->q_rx[qid];
+	int i;
+
+	for (i = 0; i < q->ndesc; i++)
+		q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
+
+	mt76_dma_rx_cleanup(dev, q);
+	mt76_dma_sync_idx(dev, q);
+	mt76_dma_rx_fill(dev, q, false);
+}
+
+static void
+mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
+		  int len, bool more)
+{
+	struct page *page = virt_to_head_page(data);
+	int offset = data - page_address(page);
+	struct sk_buff *skb = q->rx_head;
+
+	offset += q->buf_offset;
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
+			q->buf_size);
+
+	if (more)
+		return;
+
+	q->rx_head = NULL;
+	dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+}
+
+static int
+mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
+{
+	struct sk_buff *skb;
+	unsigned char *data;
+	int len;
+	int done = 0;
+	bool more;
+
+	while (done < budget) {
+		u32 info;
+
+		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
+		if (!data)
+			break;
+
+		if (q->rx_head) {
+			mt76_add_fragment(dev, q, data, len, more);
+			continue;
+		}
+
+		skb = build_skb(data, q->buf_size);
+		if (!skb) {
+			skb_free_frag(data);
+			continue;
+		}
+
+		skb_reserve(skb, q->buf_offset);
+		if (skb->tail + len > skb->end) {
+			dev_kfree_skb(skb);
+			continue;
+		}
+
+		if (q == &dev->q_rx[MT_RXQ_MCU]) {
+			u32 *rxfce = (u32 *) skb->cb;
+			*rxfce = info;
+		}
+
+		__skb_put(skb, len);
+		done++;
+
+		if (more) {
+			q->rx_head = skb;
+			continue;
+		}
+
+		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
+	}
+
+	mt76_dma_rx_fill(dev, q, true);
+	return done;
+}
+
+static int
+mt76_dma_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct mt76_dev *dev;
+	int qid, done;
+
+	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+	qid = napi - dev->napi;
+
+	done = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget);
+	if (done < budget) {
+		napi_complete(napi);
+		dev->drv->rx_poll_complete(dev, qid);
+	}
+	mt76_rx_complete(dev, qid);
+
+	return done;
+}
+
+static int
+mt76_dma_init(struct mt76_dev *dev)
+{
+	int i;
+
+	init_dummy_netdev(&dev->napi_dev);
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+		netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
+			       64);
+		mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+		skb_queue_head_init(&dev->rx_skb[i]);
+		napi_enable(&dev->napi[i]);
+	}
+
+	return 0;
+}
+
+static const struct mt76_queue_ops mt76_dma_ops = {
+	.init = mt76_dma_init,
+	.alloc = mt76_dma_alloc_queue,
+	.add_buf = mt76_dma_add_buf,
+	.tx_cleanup = mt76_dma_tx_cleanup,
+	.rx_reset = mt76_dma_rx_reset,
+	.kick = mt76_dma_kick_queue,
+};
+
+int mt76_dma_attach(struct mt76_dev *dev)
+{
+	dev->queue_ops = &mt76_dma_ops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_dma_attach);
+
+void mt76_dma_cleanup(struct mt76_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
+		mt76_dma_tx_cleanup(dev, i, true);
+
+	for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+		netif_napi_del(&dev->napi[i]);
+		mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
new file mode 100644
index 0000000..1dad396
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __MT76_DMA_H
+#define __MT76_DMA_H
+
+#define MT_RING_SIZE			0x10
+
+#define MT_DMA_CTL_SD_LEN1		GENMASK(13, 0)
+#define MT_DMA_CTL_LAST_SEC1		BIT(14)
+#define MT_DMA_CTL_BURST		BIT(15)
+#define MT_DMA_CTL_SD_LEN0		GENMASK(29, 16)
+#define MT_DMA_CTL_LAST_SEC0		BIT(30)
+#define MT_DMA_CTL_DMA_DONE		BIT(31)
+
+struct mt76_desc {
+	__le32 buf0;
+	__le32 ctrl;
+	__le32 buf1;
+	__le32 info;
+} __packed __aligned(4);
+
+int mt76_dma_attach(struct mt76_dev *dev);
+void mt76_dma_cleanup(struct mt76_dev *dev);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
new file mode 100644
index 0000000..530e5593
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include "mt76.h"
+
+static int
+mt76_get_of_eeprom(struct mt76_dev *dev, int len)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_MTD)
+	struct device_node *np = dev->dev->of_node;
+	struct mtd_info *mtd;
+	const __be32 *list;
+	const char *part;
+	phandle phandle;
+	int offset = 0;
+	int size;
+	size_t retlen;
+	int ret;
+
+	if (!np)
+		return -ENOENT;
+
+	list = of_get_property(np, "mediatek,mtd-eeprom", &size);
+	if (!list)
+		return -ENOENT;
+
+	phandle = be32_to_cpup(list++);
+	if (!phandle)
+		return -ENOENT;
+
+	np = of_find_node_by_phandle(phandle);
+	if (!np)
+		return -EINVAL;
+
+	part = of_get_property(np, "label", NULL);
+	if (!part)
+		part = np->name;
+
+	mtd = get_mtd_device_nm(part);
+	if (IS_ERR(mtd))
+		return PTR_ERR(mtd);
+
+	if (size <= sizeof(*list))
+		return -EINVAL;
+
+	offset = be32_to_cpup(list);
+	ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
+	put_mtd_device(mtd);
+	if (ret)
+		return ret;
+
+	if (retlen < len)
+		return -EINVAL;
+
+	return 0;
+#else
+	return -ENOENT;
+#endif
+}
+
+void
+mt76_eeprom_override(struct mt76_dev *dev)
+{
+#ifdef CONFIG_OF
+	struct device_node *np = dev->dev->of_node;
+	const u8 *mac;
+
+	if (!np)
+		return;
+
+	mac = of_get_mac_address(np);
+	if (mac)
+		memcpy(dev->macaddr, mac, ETH_ALEN);
+#endif
+
+	if (!is_valid_ether_addr(dev->macaddr)) {
+		eth_random_addr(dev->macaddr);
+		dev_info(dev->dev,
+			 "Invalid MAC address, using random address %pM\n",
+			 dev->macaddr);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_override);
+
+int
+mt76_eeprom_init(struct mt76_dev *dev, int len)
+{
+	dev->eeprom.size = len;
+	dev->eeprom.data = devm_kzalloc(dev->dev, len, GFP_KERNEL);
+	if (!dev->eeprom.data)
+		return -ENOMEM;
+
+	return !mt76_get_of_eeprom(dev, len);
+}
+EXPORT_SYMBOL_GPL(mt76_eeprom_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
new file mode 100644
index 0000000..3acf0e1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/of.h>
+#include "mt76.h"
+
+#define CHAN2G(_idx, _freq) {			\
+	.band = NL80211_BAND_2GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+#define CHAN5G(_idx, _freq) {			\
+	.band = NL80211_BAND_5GHZ,		\
+	.center_freq = (_freq),			\
+	.hw_value = (_idx),			\
+	.max_power = 30,			\
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+	CHAN2G(1, 2412),
+	CHAN2G(2, 2417),
+	CHAN2G(3, 2422),
+	CHAN2G(4, 2427),
+	CHAN2G(5, 2432),
+	CHAN2G(6, 2437),
+	CHAN2G(7, 2442),
+	CHAN2G(8, 2447),
+	CHAN2G(9, 2452),
+	CHAN2G(10, 2457),
+	CHAN2G(11, 2462),
+	CHAN2G(12, 2467),
+	CHAN2G(13, 2472),
+	CHAN2G(14, 2484),
+};
+
+static const struct ieee80211_channel mt76_channels_5ghz[] = {
+	CHAN5G(36, 5180),
+	CHAN5G(40, 5200),
+	CHAN5G(44, 5220),
+	CHAN5G(48, 5240),
+
+	CHAN5G(52, 5260),
+	CHAN5G(56, 5280),
+	CHAN5G(60, 5300),
+	CHAN5G(64, 5320),
+
+	CHAN5G(100, 5500),
+	CHAN5G(104, 5520),
+	CHAN5G(108, 5540),
+	CHAN5G(112, 5560),
+	CHAN5G(116, 5580),
+	CHAN5G(120, 5600),
+	CHAN5G(124, 5620),
+	CHAN5G(128, 5640),
+	CHAN5G(132, 5660),
+	CHAN5G(136, 5680),
+	CHAN5G(140, 5700),
+
+	CHAN5G(149, 5745),
+	CHAN5G(153, 5765),
+	CHAN5G(157, 5785),
+	CHAN5G(161, 5805),
+	CHAN5G(165, 5825),
+};
+
+static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
+	{ .throughput =   0 * 1024, .blink_time = 334 },
+	{ .throughput =   1 * 1024, .blink_time = 260 },
+	{ .throughput =   5 * 1024, .blink_time = 220 },
+	{ .throughput =  10 * 1024, .blink_time = 190 },
+	{ .throughput =  20 * 1024, .blink_time = 170 },
+	{ .throughput =  50 * 1024, .blink_time = 150 },
+	{ .throughput =  70 * 1024, .blink_time = 130 },
+	{ .throughput = 100 * 1024, .blink_time = 110 },
+	{ .throughput = 200 * 1024, .blink_time =  80 },
+	{ .throughput = 300 * 1024, .blink_time =  50 },
+};
+
+static int mt76_led_init(struct mt76_dev *dev)
+{
+	struct device_node *np = dev->dev->of_node;
+	struct ieee80211_hw *hw = dev->hw;
+	int led_pin;
+
+	if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+		return 0;
+
+	snprintf(dev->led_name, sizeof(dev->led_name),
+		 "mt76-%s", wiphy_name(hw->wiphy));
+
+	dev->led_cdev.name = dev->led_name;
+	dev->led_cdev.default_trigger =
+		ieee80211_create_tpt_led_trigger(hw,
+					IEEE80211_TPT_LEDTRIG_FL_RADIO,
+					mt76_tpt_blink,
+					ARRAY_SIZE(mt76_tpt_blink));
+
+	np = of_get_child_by_name(np, "led");
+	if (np) {
+		if (!of_property_read_u32(np, "led-sources", &led_pin))
+			dev->led_pin = led_pin;
+		dev->led_al = of_property_read_bool(np, "led-active-low");
+	}
+
+	return devm_led_classdev_register(dev->dev, &dev->led_cdev);
+}
+
+static int
+mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
+		const struct ieee80211_channel *chan, int n_chan,
+		struct ieee80211_rate *rates, int n_rates, bool vht)
+{
+	struct ieee80211_supported_band *sband = &msband->sband;
+	struct ieee80211_sta_ht_cap *ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap;
+	void *chanlist;
+	u16 mcs_map;
+	int size;
+
+	size = n_chan * sizeof(*chan);
+	chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+	if (!chanlist)
+		return -ENOMEM;
+
+	msband->chan = devm_kzalloc(dev->dev, n_chan * sizeof(*msband->chan),
+				    GFP_KERNEL);
+	if (!msband->chan)
+		return -ENOMEM;
+
+	sband->channels = chanlist;
+	sband->n_channels = n_chan;
+	sband->bitrates = rates;
+	sband->n_bitrates = n_rates;
+	dev->chandef.chan = &sband->channels[0];
+
+	ht_cap = &sband->ht_cap;
+	ht_cap->ht_supported = true;
+	ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+		       IEEE80211_HT_CAP_GRN_FLD |
+		       IEEE80211_HT_CAP_SGI_20 |
+		       IEEE80211_HT_CAP_SGI_40 |
+		       IEEE80211_HT_CAP_TX_STBC |
+		       (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+	ht_cap->mcs.rx_mask[0] = 0xff;
+	ht_cap->mcs.rx_mask[1] = 0xff;
+	ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+	if (!vht)
+		return 0;
+
+	vht_cap = &sband->vht_cap;
+	vht_cap->vht_supported = true;
+
+	mcs_map = (IEEE80211_VHT_MCS_SUPPORT_0_9 << (0 * 2)) |
+		  (IEEE80211_VHT_MCS_SUPPORT_0_9 << (1 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (2 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (3 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (4 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (5 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (6 * 2)) |
+		  (IEEE80211_VHT_MCS_NOT_SUPPORTED << (7 * 2));
+
+	vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
+			IEEE80211_VHT_CAP_TXSTBC |
+			IEEE80211_VHT_CAP_RXSTBC_1 |
+			IEEE80211_VHT_CAP_SHORT_GI_80;
+
+	return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+		   int n_rates)
+{
+	dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->sband_2g.sband;
+
+	return mt76_init_sband(dev, &dev->sband_2g,
+			       mt76_channels_2ghz,
+			       ARRAY_SIZE(mt76_channels_2ghz),
+			       rates, n_rates, false);
+}
+
+static int
+mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates,
+		   int n_rates, bool vht)
+{
+	dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->sband_5g.sband;
+
+	return mt76_init_sband(dev, &dev->sband_5g,
+			       mt76_channels_5ghz,
+			       ARRAY_SIZE(mt76_channels_5ghz),
+			       rates, n_rates, vht);
+}
+
+static void
+mt76_check_sband(struct mt76_dev *dev, int band)
+{
+	struct ieee80211_supported_band *sband = dev->hw->wiphy->bands[band];
+	bool found = false;
+	int i;
+
+	if (!sband)
+		return;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
+			continue;
+
+		found = true;
+		break;
+	}
+
+	if (found)
+		return;
+
+	sband->n_channels = 0;
+	dev->hw->wiphy->bands[band] = NULL;
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+			 struct ieee80211_rate *rates, int n_rates)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	dev_set_drvdata(dev->dev, dev);
+
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->cc_lock);
+	INIT_LIST_HEAD(&dev->txwi_cache);
+
+	SET_IEEE80211_DEV(hw, dev->dev);
+	SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+	wiphy->interface_modes =
+		BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+		BIT(NL80211_IFTYPE_ADHOC);
+
+	wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+
+	hw->txq_data_size = sizeof(struct mt76_txq);
+	hw->max_tx_fragments = 16;
+
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+	ieee80211_hw_set(hw, TX_AMSDU);
+	ieee80211_hw_set(hw, TX_FRAG_LIST);
+	ieee80211_hw_set(hw, MFP_CAPABLE);
+
+	wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+	if (dev->cap.has_2ghz) {
+		ret = mt76_init_sband_2g(dev, rates, n_rates);
+		if (ret)
+			return ret;
+	}
+
+	if (dev->cap.has_5ghz) {
+		ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht);
+		if (ret)
+			return ret;
+	}
+
+	wiphy_read_of_freq_limits(dev->hw->wiphy);
+	mt76_check_sband(dev, NL80211_BAND_2GHZ);
+	mt76_check_sband(dev, NL80211_BAND_5GHZ);
+
+	ret = mt76_led_init(dev);
+	if (ret)
+		return ret;
+
+	return ieee80211_register_hw(hw);
+}
+EXPORT_SYMBOL_GPL(mt76_register_device);
+
+void mt76_unregister_device(struct mt76_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+
+	ieee80211_unregister_hw(hw);
+	mt76_tx_free(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_unregister_device);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
+{
+	if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	__skb_queue_tail(&dev->rx_skb[q], skb);
+}
+EXPORT_SYMBOL_GPL(mt76_rx);
+
+void mt76_set_channel(struct mt76_dev *dev)
+{
+	struct ieee80211_hw *hw = dev->hw;
+	struct cfg80211_chan_def *chandef = &hw->conf.chandef;
+	struct mt76_channel_state *state;
+	bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
+
+	if (dev->drv->update_survey)
+		dev->drv->update_survey(dev);
+
+	dev->chandef = *chandef;
+
+	if (!offchannel)
+		dev->main_chan = chandef->chan;
+
+	if (chandef->chan != dev->main_chan) {
+		state = mt76_channel_state(dev, chandef->chan);
+		memset(state, 0, sizeof(*state));
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_set_channel);
+
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+		    struct survey_info *survey)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct mt76_sband *sband;
+	struct ieee80211_channel *chan;
+	struct mt76_channel_state *state;
+	int ret = 0;
+
+	if (idx == 0 && dev->drv->update_survey)
+		dev->drv->update_survey(dev);
+
+	sband = &dev->sband_2g;
+	if (idx >= sband->sband.n_channels) {
+		idx -= sband->sband.n_channels;
+		sband = &dev->sband_5g;
+	}
+
+	if (idx >= sband->sband.n_channels)
+		return -ENOENT;
+
+	chan = &sband->sband.channels[idx];
+	state = mt76_channel_state(dev, chan);
+
+	memset(survey, 0, sizeof(*survey));
+	survey->channel = chan;
+	survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
+	if (chan == dev->main_chan)
+		survey->filled |= SURVEY_INFO_IN_USE;
+
+	spin_lock_bh(&dev->cc_lock);
+	survey->time = div_u64(state->cc_active, 1000);
+	survey->time_busy = div_u64(state->cc_busy, 1000);
+	spin_unlock_bh(&dev->cc_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_get_survey);
+
+void mt76_rx_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
+{
+	struct sk_buff *skb;
+
+	while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL)
+		ieee80211_rx_napi(dev->hw, NULL, skb, &dev->napi[q]);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
new file mode 100644
index 0000000..09a14de
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+#include "trace.h"
+
+static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
+{
+	u32 val;
+
+	val = ioread32(dev->regs + offset);
+	trace_reg_rr(dev, offset, val);
+
+	return val;
+}
+
+static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+	trace_reg_wr(dev, offset, val);
+	iowrite32(val, dev->regs + offset);
+}
+
+static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+	val |= mt76_mmio_rr(dev, offset) & ~mask;
+	mt76_mmio_wr(dev, offset, val);
+	return val;
+}
+
+static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
+			   int len)
+{
+	__iowrite32_copy(dev->regs + offset, data, len >> 2);
+}
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
+{
+	static const struct mt76_bus_ops mt76_mmio_ops = {
+		.rr = mt76_mmio_rr,
+		.rmw = mt76_mmio_rmw,
+		.wr = mt76_mmio_wr,
+		.copy = mt76_mmio_copy,
+	};
+
+	dev->bus = &mt76_mmio_ops;
+	dev->regs = regs;
+}
+EXPORT_SYMBOL_GPL(mt76_mmio_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
new file mode 100644
index 0000000..aa0880b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76_H
+#define __MT76_H
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/leds.h>
+#include <net/mac80211.h>
+#include "util.h"
+
+#define MT_TX_RING_SIZE     256
+#define MT_MCU_RING_SIZE    32
+#define MT_RX_BUF_SIZE      2048
+
+struct mt76_dev;
+
+struct mt76_bus_ops {
+	u32 (*rr)(struct mt76_dev *dev, u32 offset);
+	void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
+	u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+	void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
+		     int len);
+};
+
+enum mt76_txq_id {
+	MT_TXQ_VO = IEEE80211_AC_VO,
+	MT_TXQ_VI = IEEE80211_AC_VI,
+	MT_TXQ_BE = IEEE80211_AC_BE,
+	MT_TXQ_BK = IEEE80211_AC_BK,
+	MT_TXQ_PSD,
+	MT_TXQ_MCU,
+	MT_TXQ_BEACON,
+	MT_TXQ_CAB,
+	__MT_TXQ_MAX
+};
+
+enum mt76_rxq_id {
+	MT_RXQ_MAIN,
+	MT_RXQ_MCU,
+	__MT_RXQ_MAX
+};
+
+struct mt76_queue_buf {
+	dma_addr_t addr;
+	int len;
+};
+
+struct mt76_queue_entry {
+	union {
+		void *buf;
+		struct sk_buff *skb;
+	};
+	struct mt76_txwi_cache *txwi;
+	bool schedule;
+};
+
+struct mt76_queue_regs {
+	u32 desc_base;
+	u32 ring_size;
+	u32 cpu_idx;
+	u32 dma_idx;
+} __packed __aligned(4);
+
+struct mt76_queue {
+	struct mt76_queue_regs __iomem *regs;
+
+	spinlock_t lock;
+	struct mt76_queue_entry *entry;
+	struct mt76_desc *desc;
+
+	struct list_head swq;
+	int swq_queued;
+
+	u16 head;
+	u16 tail;
+	int ndesc;
+	int queued;
+	int buf_size;
+
+	u8 buf_offset;
+	u8 hw_idx;
+
+	dma_addr_t desc_dma;
+	struct sk_buff *rx_head;
+};
+
+struct mt76_queue_ops {
+	int (*init)(struct mt76_dev *dev);
+
+	int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
+
+	int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
+		       struct mt76_queue_buf *buf, int nbufs, u32 info,
+		       struct sk_buff *skb, void *txwi);
+
+	void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
+			 int *len, u32 *info, bool *more);
+
+	void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
+
+	void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
+			   bool flush);
+
+	void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
+};
+
+struct mt76_wcid {
+	u8 idx;
+	u8 hw_key_idx;
+
+	__le16 tx_rate;
+	bool tx_rate_set;
+	u8 tx_rate_nss;
+	s8 max_txpwr_adj;
+};
+
+struct mt76_txq {
+	struct list_head list;
+	struct mt76_queue *hwq;
+	struct mt76_wcid *wcid;
+
+	struct sk_buff_head retry_q;
+
+	u16 agg_ssn;
+	bool send_bar;
+	bool aggr;
+};
+
+struct mt76_txwi_cache {
+	u32 txwi[8];
+	dma_addr_t dma_addr;
+	struct list_head list;
+};
+
+enum {
+	MT76_STATE_INITIALIZED,
+	MT76_STATE_RUNNING,
+	MT76_SCANNING,
+	MT76_RESET,
+};
+
+struct mt76_hw_cap {
+	bool has_2ghz;
+	bool has_5ghz;
+};
+
+struct mt76_driver_ops {
+	u16 txwi_size;
+
+	void (*update_survey)(struct mt76_dev *dev);
+
+	int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
+			      struct sk_buff *skb, struct mt76_queue *q,
+			      struct mt76_wcid *wcid,
+			      struct ieee80211_sta *sta, u32 *tx_info);
+
+	void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+				struct mt76_queue_entry *e, bool flush);
+
+	void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
+		       struct sk_buff *skb);
+
+	void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+};
+
+struct mt76_channel_state {
+	u64 cc_active;
+	u64 cc_busy;
+};
+
+struct mt76_sband {
+	struct ieee80211_supported_band sband;
+	struct mt76_channel_state *chan;
+};
+
+struct mt76_dev {
+	struct ieee80211_hw *hw;
+	struct cfg80211_chan_def chandef;
+	struct ieee80211_channel *main_chan;
+
+	spinlock_t lock;
+	spinlock_t cc_lock;
+	const struct mt76_bus_ops *bus;
+	const struct mt76_driver_ops *drv;
+	void __iomem *regs;
+	struct device *dev;
+
+	struct net_device napi_dev;
+	struct napi_struct napi[__MT_RXQ_MAX];
+	struct sk_buff_head rx_skb[__MT_RXQ_MAX];
+
+	struct list_head txwi_cache;
+	struct mt76_queue q_tx[__MT_TXQ_MAX];
+	struct mt76_queue q_rx[__MT_RXQ_MAX];
+	const struct mt76_queue_ops *queue_ops;
+
+	u8 macaddr[ETH_ALEN];
+	u32 rev;
+	unsigned long state;
+
+	struct mt76_sband sband_2g;
+	struct mt76_sband sband_5g;
+	struct debugfs_blob_wrapper eeprom;
+	struct debugfs_blob_wrapper otp;
+	struct mt76_hw_cap cap;
+
+	u32 debugfs_reg;
+
+	struct led_classdev led_cdev;
+	char led_name[32];
+	bool led_al;
+	u8 led_pin;
+};
+
+enum mt76_phy_type {
+	MT_PHY_TYPE_CCK,
+	MT_PHY_TYPE_OFDM,
+	MT_PHY_TYPE_HT,
+	MT_PHY_TYPE_HT_GF,
+	MT_PHY_TYPE_VHT,
+};
+
+struct mt76_rate_power {
+	union {
+		struct {
+			s8 cck[4];
+			s8 ofdm[8];
+			s8 ht[16];
+			s8 vht[10];
+		};
+		s8 all[38];
+	};
+};
+
+#define mt76_rr(dev, ...)	(dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr(dev, ...)	(dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
+#define mt76_rmw(dev, ...)	(dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_wr_copy(dev, ...)	(dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
+
+#define mt76_set(dev, offset, val)	mt76_rmw(dev, offset, 0, val)
+#define mt76_clear(dev, offset, val)	mt76_rmw(dev, offset, val, 0)
+
+#define mt76_get_field(_dev, _reg, _field)		\
+	FIELD_GET(_field, mt76_rr(dev, _reg))
+
+#define mt76_rmw_field(_dev, _reg, _field, _val)	\
+	mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
+
+#define mt76_hw(dev) (dev)->mt76.hw
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		 int timeout);
+
+#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		      int timeout);
+
+#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+
+void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+
+static inline u16 mt76_chip(struct mt76_dev *dev)
+{
+	return dev->rev >> 16;
+}
+
+static inline u16 mt76_rev(struct mt76_dev *dev)
+{
+	return dev->rev & 0xffff;
+}
+
+#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
+#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
+
+#define mt76_init_queues(dev)		(dev)->mt76.queue_ops->init(&((dev)->mt76))
+#define mt76_queue_alloc(dev, ...)	(dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_add_buf(dev, ...)	(dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_rx_reset(dev, ...)	(dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_tx_cleanup(dev, ...)	(dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
+#define mt76_queue_kick(dev, ...)	(dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
+
+static inline struct mt76_channel_state *
+mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
+{
+	struct mt76_sband *msband;
+	int idx;
+
+	if (c->band == NL80211_BAND_2GHZ)
+		msband = &dev->sband_2g;
+	else
+		msband = &dev->sband_5g;
+
+	idx = c - &msband->sband.channels[0];
+	return &msband->chan[idx];
+}
+
+int mt76_register_device(struct mt76_dev *dev, bool vht,
+			 struct ieee80211_rate *rates, int n_rates);
+void mt76_unregister_device(struct mt76_dev *dev);
+
+struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+
+int mt76_eeprom_init(struct mt76_dev *dev, int len);
+void mt76_eeprom_override(struct mt76_dev *dev);
+
+static inline struct ieee80211_txq *
+mtxq_to_txq(struct mt76_txq *mtxq)
+{
+	void *ptr = mtxq;
+
+	return container_of(ptr, struct ieee80211_txq, drv_priv);
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+		      struct sk_buff *skb, struct mt76_wcid *wcid,
+		      struct ieee80211_sta *sta);
+
+void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
+void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+	     struct mt76_wcid *wcid, struct sk_buff *skb);
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+			 bool send_bar);
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule_all(struct mt76_dev *dev);
+void mt76_release_buffered_frames(struct ieee80211_hw *hw,
+				  struct ieee80211_sta *sta,
+				  u16 tids, int nframes,
+				  enum ieee80211_frame_release_type reason,
+				  bool more_data);
+void mt76_set_channel(struct mt76_dev *dev);
+int mt76_get_survey(struct ieee80211_hw *hw, int idx,
+		    struct survey_info *survey);
+
+/* internal */
+void mt76_tx_free(struct mt76_dev *dev);
+void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
+void mt76_rx_complete(struct mt76_dev *dev, enum mt76_rxq_id q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
new file mode 100644
index 0000000..a12dfce
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_H
+#define __MT76x2_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/kfifo.h>
+
+#define MT7662_FIRMWARE		"mt7662.bin"
+#define MT7662_ROM_PATCH	"mt7662_rom_patch.bin"
+#define MT7662_EEPROM_SIZE	512
+
+#define MT76x2_RX_RING_SIZE	256
+#define MT_RX_HEADROOM		32
+
+#define MT_MAX_CHAINS		2
+
+#define MT_CALIBRATE_INTERVAL	HZ
+
+#include "mt76.h"
+#include "mt76x2_regs.h"
+#include "mt76x2_mac.h"
+#include "mt76x2_dfs.h"
+
+struct mt76x2_mcu {
+	struct mutex mutex;
+
+	wait_queue_head_t wait;
+	struct sk_buff_head res_q;
+
+	u32 msg_seq;
+};
+
+struct mt76x2_rx_freq_cal {
+	s8 high_gain[MT_MAX_CHAINS];
+	s8 rssi_offset[MT_MAX_CHAINS];
+	s8 lna_gain;
+	u32 mcu_gain;
+};
+
+struct mt76x2_calibration {
+	struct mt76x2_rx_freq_cal rx;
+
+	u8 agc_gain_init[MT_MAX_CHAINS];
+	u8 agc_gain_cur[MT_MAX_CHAINS];
+
+	int avg_rssi[MT_MAX_CHAINS];
+	int avg_rssi_all;
+
+	s8 agc_gain_adjust;
+	s8 low_gain;
+
+	u8 temp;
+
+	bool init_cal_done;
+	bool tssi_cal_done;
+	bool tssi_comp_pending;
+	bool dpd_cal_done;
+	bool channel_cal_done;
+};
+
+struct mt76x2_dev {
+	struct mt76_dev mt76; /* must be first */
+
+	struct mac_address macaddr_list[8];
+
+	struct mutex mutex;
+
+	const u16 *beacon_offsets;
+	unsigned long wcid_mask[128 / BITS_PER_LONG];
+
+	int txpower_conf;
+	int txpower_cur;
+
+	u8 txdone_seq;
+	DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
+
+	struct mt76x2_mcu mcu;
+	struct sk_buff *rx_head;
+
+	struct tasklet_struct tx_tasklet;
+	struct tasklet_struct pre_tbtt_tasklet;
+	struct delayed_work cal_work;
+	struct delayed_work mac_work;
+
+	u32 aggr_stats[32];
+
+	struct mt76_wcid global_wcid;
+	struct mt76_wcid __rcu *wcid[128];
+
+	spinlock_t irq_lock;
+	u32 irqmask;
+
+	struct sk_buff *beacons[8];
+	u8 beacon_mask;
+	u8 beacon_data_mask;
+
+	u32 rev;
+	u32 rxfilter;
+
+	u16 chainmask;
+
+	struct mt76x2_calibration cal;
+
+	s8 target_power;
+	s8 target_power_delta[2];
+	struct mt76_rate_power rate_power;
+	bool enable_tpc;
+
+	u8 coverage_class;
+	u8 slottime;
+
+	struct mt76x2_dfs_pattern_detector dfs_pd;
+};
+
+struct mt76x2_vif {
+	u8 idx;
+
+	struct mt76_wcid group_wcid;
+};
+
+struct mt76x2_sta {
+	struct mt76_wcid wcid; /* must be first */
+
+	struct mt76x2_tx_status status;
+	int n_frames;
+};
+
+static inline bool is_mt7612(struct mt76x2_dev *dev)
+{
+	return (dev->rev >> 16) == 0x7612;
+}
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
+
+static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
+{
+	mt76x2_set_irq_mask(dev, 0, mask);
+}
+
+static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
+{
+	mt76x2_set_irq_mask(dev, mask, 0);
+}
+
+extern const struct ieee80211_ops mt76x2_ops;
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
+int mt76x2_register_device(struct mt76x2_dev *dev);
+void mt76x2_init_debugfs(struct mt76x2_dev *dev);
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
+void mt76x2_phy_power_on(struct mt76x2_dev *dev);
+int mt76x2_init_hardware(struct mt76x2_dev *dev);
+void mt76x2_stop_hardware(struct mt76x2_dev *dev);
+int mt76x2_eeprom_init(struct mt76x2_dev *dev);
+int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
+
+int mt76x2_phy_start(struct mt76x2_dev *dev);
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+			 struct cfg80211_chan_def *chandef);
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
+void mt76x2_phy_calibrate(struct work_struct *work);
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev);
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+			   u8 bw_index, bool scan);
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+		       u8 channel);
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_dma_init(struct mt76x2_dev *dev);
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
+
+void mt76x2_cleanup(struct mt76x2_dev *dev);
+
+int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+			struct sk_buff *skb, int cmd, int seq);
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+	       struct sk_buff *skb);
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+			  struct sk_buff *skb, struct mt76_queue *q,
+			  struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+			  u32 *tx_info);
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+			    struct mt76_queue_entry *e, bool flush);
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg);
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb);
+
+void mt76x2_update_channel(struct mt76_dev *mdev);
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+			       const struct ieee80211_tx_rate *rate);
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_core.c b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
new file mode 100644
index 0000000..2629779
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_core.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->irq_lock, flags);
+	dev->irqmask &= ~clear;
+	dev->irqmask |= set;
+	mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
+	spin_unlock_irqrestore(&dev->irq_lock, flags);
+}
+
+void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+	mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
+{
+	struct mt76x2_dev *dev = dev_instance;
+	u32 intr;
+
+	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+		return IRQ_NONE;
+
+	trace_dev_irq(dev, intr, dev->irqmask);
+
+	intr &= dev->irqmask;
+
+	if (intr & MT_INT_TX_DONE_ALL) {
+		mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
+		tasklet_schedule(&dev->tx_tasklet);
+	}
+
+	if (intr & MT_INT_RX_DONE(0)) {
+		mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
+		napi_schedule(&dev->mt76.napi[0]);
+	}
+
+	if (intr & MT_INT_RX_DONE(1)) {
+		mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
+		napi_schedule(&dev->mt76.napi[1]);
+	}
+
+	if (intr & MT_INT_PRE_TBTT)
+		tasklet_schedule(&dev->pre_tbtt_tasklet);
+
+	/* send buffered multicast frames now */
+	if (intr & MT_INT_TBTT)
+		mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+
+	if (intr & MT_INT_TX_STAT) {
+		mt76x2_mac_poll_tx_status(dev, true);
+		tasklet_schedule(&dev->tx_tasklet);
+	}
+
+	if (intr & MT_INT_GPTIMER) {
+		mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
+	}
+
+	return IRQ_HANDLED;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
new file mode 100644
index 0000000..612feb5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_debugfs.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "mt76x2.h"
+
+static int
+mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
+{
+	struct mt76x2_dev *dev = file->private;
+	int i, j;
+
+	for (i = 0; i < 4; i++) {
+		seq_puts(file, "Length: ");
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%8d | ", i * 8 + j + 1);
+		seq_puts(file, "\n");
+		seq_puts(file, "Count:  ");
+		for (j = 0; j < 8; j++)
+			seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
+		seq_puts(file, "\n");
+		seq_puts(file, "--------");
+		for (j = 0; j < 8; j++)
+			seq_puts(file, "-----------");
+		seq_puts(file, "\n");
+	}
+
+	return 0;
+}
+
+static int
+mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
+}
+
+static void
+seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
+{
+	int i;
+
+	seq_printf(file, "%10s:", str);
+	for (i = 0; i < len; i++)
+		seq_printf(file, " %2d", val[i]);
+	seq_puts(file, "\n");
+}
+
+static int read_txpower(struct seq_file *file, void *data)
+{
+	struct mt76x2_dev *dev = dev_get_drvdata(file->private);
+
+	seq_printf(file, "Target power: %d\n", dev->target_power);
+
+	seq_puts_array(file, "Delta", dev->target_power_delta,
+		       ARRAY_SIZE(dev->target_power_delta));
+	seq_puts_array(file, "CCK", dev->rate_power.cck,
+		       ARRAY_SIZE(dev->rate_power.cck));
+	seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
+		       ARRAY_SIZE(dev->rate_power.ofdm));
+	seq_puts_array(file, "HT", dev->rate_power.ht,
+		       ARRAY_SIZE(dev->rate_power.ht));
+	seq_puts_array(file, "VHT", dev->rate_power.vht,
+		       ARRAY_SIZE(dev->rate_power.vht));
+	return 0;
+}
+
+static const struct file_operations fops_ampdu_stat = {
+	.open = mt76x2_ampdu_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int
+mt76x2_dfs_stat_read(struct seq_file *file, void *data)
+{
+	int i;
+	struct mt76x2_dev *dev = file->private;
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		seq_printf(file, "engine: %d\n", i);
+		seq_printf(file, "  hw pattern detected:\t%d\n",
+			   dfs_pd->stats[i].hw_pattern);
+		seq_printf(file, "  hw pulse discarded:\t%d\n",
+			   dfs_pd->stats[i].hw_pulse_discarded);
+	}
+
+	return 0;
+}
+
+static int
+mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
+{
+	return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_dfs_stat = {
+	.open = mt76x2_dfs_stat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+void mt76x2_init_debugfs(struct mt76x2_dev *dev)
+{
+	struct dentry *dir;
+
+	dir = mt76_register_debugfs(&dev->mt76);
+	if (!dir)
+		return;
+
+	debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp);
+	debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc);
+
+	debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+	debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat);
+	debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
+				    read_txpower);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
new file mode 100644
index 0000000..5b452a5
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+#define RADAR_SPEC(m, len, el, eh, wl, wh,		\
+		   w_tolerance, tl, th, t_tolerance,	\
+		   bl, bh, event_exp, power_jmp)	\
+{							\
+	.mode = m,					\
+	.avg_len = len,					\
+	.e_low = el,					\
+	.e_high = eh,					\
+	.w_low = wl,					\
+	.w_high = wh,					\
+	.w_margin = w_tolerance,			\
+	.t_low = tl,					\
+	.t_high = th,					\
+	.t_margin = t_tolerance,			\
+	.b_low = bl,					\
+	.b_high = bh,					\
+	.event_expiration = event_exp,			\
+	.pwr_jmp = power_jmp				\
+}
+
+static const struct mt76x2_radar_specs etsi_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
+		   0x7fffffff, 0x155cc0, 0x19cc),
+	RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
+		   0x7fffffff, 0x155cc0, 0x19dd),
+	RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
+		   0x7fffffff, 0x2191c0, 0x15cc)
+};
+
+static const struct mt76x2_radar_specs fcc_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0xfe808, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0xfe808, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
+		   0x7fffffff, 0xfe808, 0x16cc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0xfe808, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0xfe808, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0x57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0x14c080, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289),
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
+		   0x7fffffff, 0x14c080, 0x13dc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289),
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
+		   0x7fffffff, 0x14c080, 0x19dd),
+	RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
+		   0x7fffffff, 0x14c080, 0x12cc),
+	RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
+		   0x3938700, 0X57bcf00, 0x1289)
+};
+
+static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
+	/* 20MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	/* 40MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	/* 80MHz */
+	RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 },
+	RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
+		   0x7fffffff, 0x14c080, 0x16cc),
+	{ 0 }
+};
+
+static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
+					     u8 enable)
+{
+	u32 data;
+
+	data = (1 << 1) | enable;
+	mt76_wr(dev, MT_BBP(DFS, 36), data);
+}
+
+static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
+{
+	bool ret = false;
+	u32 current_ts, delta_ts;
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
+	delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
+	dfs_pd->chirp_pulse_ts = current_ts;
+
+	/* 12 sec */
+	if (delta_ts <= (12 * (1 << 20))) {
+		if (++dfs_pd->chirp_pulse_cnt > 8)
+			ret = true;
+	} else {
+		dfs_pd->chirp_pulse_cnt = 1;
+	}
+
+	return ret;
+}
+
+static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
+				    struct mt76x2_dfs_hw_pulse *pulse)
+{
+	u32 data;
+
+	/* select channel */
+	data = (MT_DFS_CH_EN << 16) | pulse->engine;
+	mt76_wr(dev, MT_BBP(DFS, 0), data);
+
+	/* reported period */
+	pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
+
+	/* reported width */
+	pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
+	pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
+
+	/* reported burst number */
+	pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
+}
+
+static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
+				      struct mt76x2_dfs_hw_pulse *pulse)
+{
+	bool ret = false;
+
+	if (!pulse->period || !pulse->w1)
+		return false;
+
+	switch (dev->dfs_pd.region) {
+	case NL80211_DFS_FCC:
+		if (pulse->engine > 3)
+			break;
+
+		if (pulse->engine == 3) {
+			ret = mt76x2_dfs_check_chirp(dev);
+			break;
+		}
+
+		/* check short pulse*/
+		if (pulse->w1 < 120)
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 4700 ||
+				pulse->period >= 6400) &&
+			       (pulse->period <= 6800 ||
+				pulse->period >= 10200) &&
+			       pulse->period <= 61600);
+		else if (pulse->w1 < 130) /* 120 - 130 */
+			ret = (pulse->period >= 2900 &&
+			       pulse->period <= 61600);
+		else
+			ret = (pulse->period >= 3500 &&
+			       pulse->period <= 10100);
+		break;
+	case NL80211_DFS_ETSI:
+		if (pulse->engine >= 3)
+			break;
+
+		ret = (pulse->period >= 4900 &&
+		       (pulse->period <= 10200 ||
+			pulse->period >= 12400) &&
+		       pulse->period <= 100100);
+		break;
+	case NL80211_DFS_JP:
+		if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+		    dev->mt76.chandef.chan->center_freq <= 5350) {
+			/* JPW53 */
+			if (pulse->w1 <= 130)
+				ret = (pulse->period >= 28360 &&
+				       (pulse->period <= 28700 ||
+					pulse->period >= 76900) &&
+				       pulse->period <= 76940);
+			break;
+		}
+
+		if (pulse->engine > 3)
+			break;
+
+		if (pulse->engine == 3) {
+			ret = mt76x2_dfs_check_chirp(dev);
+			break;
+		}
+
+		/* check short pulse*/
+		if (pulse->w1 < 120)
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 4700 ||
+				pulse->period >= 6400) &&
+			       (pulse->period <= 6800 ||
+				pulse->period >= 27560) &&
+			       (pulse->period <= 27960 ||
+				pulse->period >= 28360) &&
+			       (pulse->period <= 28700 ||
+				pulse->period >= 79900) &&
+			       pulse->period <= 80100);
+		else if (pulse->w1 < 130) /* 120 - 130 */
+			ret = (pulse->period >= 2900 &&
+			       (pulse->period <= 10100 ||
+				pulse->period >= 27560) &&
+			       (pulse->period <= 27960 ||
+				pulse->period >= 28360) &&
+			       (pulse->period <= 28700 ||
+				pulse->period >= 79900) &&
+			       pulse->period <= 80100);
+		else
+			ret = (pulse->period >= 3900 &&
+			       pulse->period <= 10100);
+		break;
+	case NL80211_DFS_UNSET:
+	default:
+		return false;
+	}
+
+	return ret;
+}
+
+static void mt76x2_dfs_tasklet(unsigned long arg)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+	u32 engine_mask;
+	int i;
+
+	if (test_bit(MT76_SCANNING, &dev->mt76.state))
+		goto out;
+
+	engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
+	if (!(engine_mask & 0xf))
+		goto out;
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		struct mt76x2_dfs_hw_pulse pulse;
+
+		if (!(engine_mask & (1 << i)))
+			continue;
+
+		pulse.engine = i;
+		mt76x2_dfs_get_hw_pulse(dev, &pulse);
+
+		if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
+			dfs_pd->stats[i].hw_pulse_discarded++;
+			continue;
+		}
+
+		/* hw detector rx radar pattern */
+		dfs_pd->stats[i].hw_pattern++;
+		ieee80211_radar_detected(dev->mt76.hw);
+
+		/* reset hw detector */
+		mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+		return;
+	}
+
+	/* reset hw detector */
+	mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+
+out:
+	mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+}
+
+static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
+{
+	u32 data;
+	u8 i, shift;
+	const struct mt76x2_radar_specs *radar_specs;
+
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_40:
+		shift = MT_DFS_NUM_ENGINES;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		shift = 2 * MT_DFS_NUM_ENGINES;
+		break;
+	default:
+		shift = 0;
+		break;
+	}
+
+	switch (dev->dfs_pd.region) {
+	case NL80211_DFS_FCC:
+		radar_specs = &fcc_radar_specs[shift];
+		break;
+	case NL80211_DFS_ETSI:
+		radar_specs = &etsi_radar_specs[shift];
+		break;
+	case NL80211_DFS_JP:
+		if (dev->mt76.chandef.chan->center_freq >= 5250 &&
+		    dev->mt76.chandef.chan->center_freq <= 5350)
+			radar_specs = &jp_w53_radar_specs[shift];
+		else
+			radar_specs = &jp_w56_radar_specs[shift];
+		break;
+	case NL80211_DFS_UNSET:
+	default:
+		return;
+	}
+
+	data = (MT_DFS_VGA_MASK << 16) |
+	       (MT_DFS_PWR_GAIN_OFFSET << 12) |
+	       (MT_DFS_PWR_DOWN_TIME << 8) |
+	       (MT_DFS_SYM_ROUND << 4) |
+	       (MT_DFS_DELTA_DELAY & 0xf);
+	mt76_wr(dev, MT_BBP(DFS, 2), data);
+
+	data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
+	mt76_wr(dev, MT_BBP(DFS, 3), data);
+
+	for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
+		/* configure engine */
+		mt76_wr(dev, MT_BBP(DFS, 0), i);
+
+		/* detection mode + avg_len */
+		data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
+		       (radar_specs[i].mode & 0xf);
+		mt76_wr(dev, MT_BBP(DFS, 4), data);
+
+		/* dfs energy */
+		data = ((radar_specs[i].e_high & 0x0fff) << 16) |
+		       (radar_specs[i].e_low & 0x0fff);
+		mt76_wr(dev, MT_BBP(DFS, 5), data);
+
+		/* dfs period */
+		mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
+		mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
+
+		/* dfs burst */
+		mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
+		mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
+
+		/* dfs width */
+		data = ((radar_specs[i].w_high & 0x0fff) << 16) |
+		       (radar_specs[i].w_low & 0x0fff);
+		mt76_wr(dev, MT_BBP(DFS, 14), data);
+
+		/* dfs margins */
+		data = (radar_specs[i].w_margin << 16) |
+		       radar_specs[i].t_margin;
+		mt76_wr(dev, MT_BBP(DFS, 15), data);
+
+		/* dfs event expiration */
+		mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
+
+		/* dfs pwr adj */
+		mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
+	}
+
+	/* reset status */
+	mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+	mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
+
+	/* enable detection*/
+	mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+	mt76_wr(dev, 0x212c, 0x0c350001);
+}
+
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
+{
+	u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
+
+	agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
+	agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
+
+	val_r8 = (agc_r8 & 0x00007e00) >> 9;
+	val_r4 = agc_r4 & ~0x1f000000;
+	val_r4 += (((val_r8 + 1) >> 1) << 24);
+	mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
+
+	dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
+	dfs_r31 += val_r8;
+	dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
+	dfs_r31 = (dfs_r31 << 16) | 0x00000307;
+	mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
+
+	mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+}
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
+{
+	struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
+
+	tasklet_kill(&dev->dfs_pd.dfs_tasklet);
+	if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
+		mt76x2_dfs_set_bbp_params(dev);
+		/* enable debug mode */
+		mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+
+		mt76x2_irq_enable(dev, MT_INT_GPTIMER);
+		mt76_rmw_field(dev, MT_INT_TIMER_EN,
+			       MT_INT_TIMER_EN_GP_TIMER_EN, 1);
+	} else {
+		/* disable hw detector */
+		mt76_wr(dev, MT_BBP(DFS, 0), 0);
+		/* clear detector status */
+		mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
+		mt76_wr(dev, 0x212c, 0);
+
+		mt76x2_irq_disable(dev, MT_INT_GPTIMER);
+		mt76_rmw_field(dev, MT_INT_TIMER_EN,
+			       MT_INT_TIMER_EN_GP_TIMER_EN, 0);
+	}
+}
+
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
+{
+	struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
+
+	dfs_pd->region = NL80211_DFS_UNSET;
+	tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
+		     (unsigned long)dev);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
new file mode 100644
index 0000000..9ac69b6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DFS_H
+#define __MT76x2_DFS_H
+
+#include <linux/types.h>
+#include <linux/nl80211.h>
+
+#define MT_DFS_GP_INTERVAL		(10 << 4) /* 64 us unit */
+#define MT_DFS_NUM_ENGINES		4
+
+/* bbp params */
+#define MT_DFS_SYM_ROUND		0
+#define MT_DFS_DELTA_DELAY		2
+#define MT_DFS_VGA_MASK			0
+#define MT_DFS_PWR_GAIN_OFFSET		3
+#define MT_DFS_PWR_DOWN_TIME		0xf
+#define MT_DFS_RX_PE_MASK		0xff
+#define MT_DFS_PKT_END_MASK		0
+#define MT_DFS_CH_EN			0xf
+
+struct mt76x2_radar_specs {
+	u8 mode;
+	u16 avg_len;
+	u16 e_low;
+	u16 e_high;
+	u16 w_low;
+	u16 w_high;
+	u16 w_margin;
+	u32 t_low;
+	u32 t_high;
+	u16 t_margin;
+	u32 b_low;
+	u32 b_high;
+	u32 event_expiration;
+	u16 pwr_jmp;
+};
+
+struct mt76x2_dfs_hw_pulse {
+	u8 engine;
+	u32 period;
+	u32 w1;
+	u32 w2;
+	u32 burst;
+};
+
+struct mt76x2_dfs_engine_stats {
+	u32 hw_pattern;
+	u32 hw_pulse_discarded;
+};
+
+struct mt76x2_dfs_pattern_detector {
+	enum nl80211_dfs_regions region;
+
+	u8 chirp_pulse_cnt;
+	u32 chirp_pulse_ts;
+
+	struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
+	struct tasklet_struct dfs_tasklet;
+};
+
+void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
+void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
+void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
+
+#endif /* __MT76x2_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
new file mode 100644
index 0000000..0a3f729
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+int
+mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
+		    struct sk_buff *skb, int cmd, int seq)
+{
+	struct mt76_queue *q = &dev->mt76.q_tx[qid];
+	struct mt76_queue_buf buf;
+	dma_addr_t addr;
+	u32 tx_info;
+
+	tx_info = MT_MCU_MSG_TYPE_CMD |
+		  FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
+		  FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
+		  FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
+		  FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
+
+	addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
+			      DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->mt76.dev, addr))
+		return -ENOMEM;
+
+	buf.addr = addr;
+	buf.len = skb->len;
+	spin_lock_bh(&q->lock);
+	mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
+	mt76_queue_kick(dev, q);
+	spin_unlock_bh(&q->lock);
+
+	return 0;
+}
+
+static int
+mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+		     int idx, int n_desc)
+{
+	int ret;
+
+	q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
+	q->ndesc = n_desc;
+
+	ret = mt76_queue_alloc(dev, q);
+	if (ret)
+		return ret;
+
+	mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
+
+	return 0;
+}
+
+void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+			 struct sk_buff *skb)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	void *rxwi = skb->data;
+
+	if (q == MT_RXQ_MCU) {
+		skb_queue_tail(&dev->mcu.res_q, skb);
+		wake_up(&dev->mcu.wait);
+		return;
+	}
+
+	skb_pull(skb, sizeof(struct mt76x2_rxwi));
+	if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	mt76_rx(&dev->mt76, q, skb);
+}
+
+static int
+mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
+		     int idx, int n_desc, int bufsize)
+{
+	int ret;
+
+	q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
+	q->ndesc = n_desc;
+	q->buf_size = bufsize;
+
+	ret = mt76_queue_alloc(dev, q);
+	if (ret)
+		return ret;
+
+	mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
+
+	return 0;
+}
+
+static void
+mt76x2_tx_tasklet(unsigned long data)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
+	int i;
+
+	mt76x2_mac_process_tx_status_fifo(dev);
+
+	for (i = MT_TXQ_MCU; i >= 0; i--)
+		mt76_queue_tx_cleanup(dev, i, false);
+
+	mt76x2_mac_poll_tx_status(dev, false);
+	mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt76x2_dma_init(struct mt76x2_dev *dev)
+{
+	static const u8 wmm_queue_map[] = {
+		[IEEE80211_AC_BE] = 0,
+		[IEEE80211_AC_BK] = 1,
+		[IEEE80211_AC_VI] = 2,
+		[IEEE80211_AC_VO] = 3,
+	};
+	int ret;
+	int i;
+	struct mt76_txwi_cache __maybe_unused *t;
+	struct mt76_queue *q;
+
+	BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
+	BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
+
+	mt76_dma_attach(&dev->mt76);
+
+	init_waitqueue_head(&dev->mcu.wait);
+	skb_queue_head_init(&dev->mcu.res_q);
+
+	tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
+
+	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+	for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
+		ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
+					   wmm_queue_map[i], MT_TX_RING_SIZE);
+		if (ret)
+			return ret;
+	}
+
+	ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+				   MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+				   MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+				   MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
+	ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
+	if (ret)
+		return ret;
+
+	return mt76_init_queues(dev);
+}
+
+void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
+{
+	tasklet_kill(&dev->tx_tasklet);
+	mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
new file mode 100644
index 0000000..47f79d8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_DMA_H
+#define __MT76x2_DMA_H
+
+#include "dma.h"
+
+#define MT_TXD_INFO_LEN			GENMASK(13, 0)
+#define MT_TXD_INFO_NEXT_VLD		BIT(16)
+#define MT_TXD_INFO_TX_BURST		BIT(17)
+#define MT_TXD_INFO_80211		BIT(19)
+#define MT_TXD_INFO_TSO			BIT(20)
+#define MT_TXD_INFO_CSO			BIT(21)
+#define MT_TXD_INFO_WIV			BIT(24)
+#define MT_TXD_INFO_QSEL		GENMASK(26, 25)
+#define MT_TXD_INFO_TCO			BIT(29)
+#define MT_TXD_INFO_UCO			BIT(30)
+#define MT_TXD_INFO_ICO			BIT(31)
+
+#define MT_RX_FCE_INFO_LEN		GENMASK(13, 0)
+#define MT_RX_FCE_INFO_SELF_GEN		BIT(15)
+#define MT_RX_FCE_INFO_CMD_SEQ		GENMASK(19, 16)
+#define MT_RX_FCE_INFO_EVT_TYPE		GENMASK(23, 20)
+#define MT_RX_FCE_INFO_PCIE_INTR	BIT(24)
+#define MT_RX_FCE_INFO_QSEL		GENMASK(26, 25)
+#define MT_RX_FCE_INFO_D_PORT		GENMASK(29, 27)
+#define MT_RX_FCE_INFO_TYPE		GENMASK(31, 30)
+
+/* MCU request message header  */
+#define MT_MCU_MSG_LEN			GENMASK(15, 0)
+#define MT_MCU_MSG_CMD_SEQ		GENMASK(19, 16)
+#define MT_MCU_MSG_CMD_TYPE		GENMASK(26, 20)
+#define MT_MCU_MSG_PORT			GENMASK(29, 27)
+#define MT_MCU_MSG_TYPE			GENMASK(31, 30)
+#define MT_MCU_MSG_TYPE_CMD		BIT(30)
+
+enum mt76x2_qsel {
+	MT_QSEL_MGMT,
+	MT_QSEL_HCCA,
+	MT_QSEL_EDCA,
+	MT_QSEL_EDCA_2,
+};
+
+enum dma_msg_port {
+	WLAN_PORT,
+	CPU_RX_PORT,
+	CPU_TX_PORT,
+	HOST_PORT,
+	VIRTUAL_CPU_RX_PORT,
+	VIRTUAL_CPU_TX_PORT,
+	DISCARD,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
new file mode 100644
index 0000000..440b7e7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <asm/unaligned.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+
+#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
+
+static int
+mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
+		   void *dest, int len)
+{
+	if (field + len > dev->mt76.eeprom.size)
+		return -1;
+
+	memcpy(dest, dev->mt76.eeprom.data + field, len);
+	return 0;
+}
+
+static int
+mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
+{
+	void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
+
+	memcpy(dev->mt76.macaddr, src, ETH_ALEN);
+	return 0;
+}
+
+static void
+mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
+{
+	u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+	switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
+	case BOARD_TYPE_5GHZ:
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	case BOARD_TYPE_2GHZ:
+		dev->mt76.cap.has_2ghz = true;
+		break;
+	default:
+		dev->mt76.cap.has_2ghz = true;
+		dev->mt76.cap.has_5ghz = true;
+		break;
+	}
+}
+
+static int
+mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
+{
+	u32 val;
+	int i;
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	val &= ~(MT_EFUSE_CTRL_AIN |
+		 MT_EFUSE_CTRL_MODE);
+	val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+	val |= MT_EFUSE_CTRL_KICK;
+	mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+	if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+		return -ETIMEDOUT;
+
+	udelay(2);
+
+	val = mt76_rr(dev, MT_EFUSE_CTRL);
+	if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+		memset(data, 0xff, 16);
+		return 0;
+	}
+
+	for (i = 0; i < 4; i++) {
+		val = mt76_rr(dev, MT_EFUSE_DATA(i));
+		put_unaligned_le32(val, data + 4 * i);
+	}
+
+	return 0;
+}
+
+static int
+mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
+{
+	int ret, i;
+
+	for (i = 0; i + 16 <= len; i += 16) {
+		ret = mt76x2_efuse_read(dev, i, buf + i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static bool
+mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+	u16 *efuse_w = (u16 *) efuse;
+
+	if (efuse_w[MT_EE_NIC_CONF_0] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
+		return false;
+
+	if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
+		return false;
+
+	return true;
+}
+
+static void
+mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
+{
+#define GROUP_5G(_id)							   \
+	MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),	   \
+	MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
+	MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id),	   \
+	MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
+
+	static const u8 cal_free_bytes[] = {
+		MT_EE_XTAL_TRIM_1,
+		MT_EE_TX_POWER_EXT_PA_5G + 1,
+		MT_EE_TX_POWER_0_START_2G,
+		MT_EE_TX_POWER_0_START_2G + 1,
+		MT_EE_TX_POWER_1_START_2G,
+		MT_EE_TX_POWER_1_START_2G + 1,
+		GROUP_5G(0),
+		GROUP_5G(1),
+		GROUP_5G(2),
+		GROUP_5G(3),
+		GROUP_5G(4),
+		GROUP_5G(5),
+		MT_EE_RF_2G_TSSI_OFF_TXPOWER,
+		MT_EE_RF_2G_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
+		MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
+		MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
+	};
+	u8 *eeprom = dev->mt76.eeprom.data;
+	u8 prev_grp0[4] = {
+		eeprom[MT_EE_TX_POWER_0_START_5G],
+		eeprom[MT_EE_TX_POWER_0_START_5G + 1],
+		eeprom[MT_EE_TX_POWER_1_START_5G],
+		eeprom[MT_EE_TX_POWER_1_START_5G + 1]
+	};
+	u16 val;
+	int i;
+
+	if (!mt76x2_has_cal_free_data(dev, efuse))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
+		int offset = cal_free_bytes[i];
+
+		eeprom[offset] = efuse[offset];
+	}
+
+	if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
+	      efuse[MT_EE_TX_POWER_0_START_5G + 1]))
+		memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
+	if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
+	      efuse[MT_EE_TX_POWER_1_START_5G + 1]))
+		memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
+
+	val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
+	if (val != 0xffff)
+		eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
+}
+
+static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
+{
+	u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
+
+	if (!val)
+		val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
+
+	switch (val) {
+	case 0x7662:
+	case 0x7612:
+		return 0;
+	default:
+		dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
+		return -EINVAL;
+	}
+}
+
+static int
+mt76x2_eeprom_load(struct mt76x2_dev *dev)
+{
+	void *efuse;
+	int len = MT7662_EEPROM_SIZE;
+	bool found;
+	int ret;
+
+	ret = mt76_eeprom_init(&dev->mt76, len);
+	if (ret < 0)
+		return ret;
+
+	found = ret;
+	if (found)
+		found = !mt76x2_check_eeprom(dev);
+
+	dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+	dev->mt76.otp.size = len;
+	if (!dev->mt76.otp.data)
+		return -ENOMEM;
+
+	efuse = dev->mt76.otp.data;
+
+	if (mt76x2_get_efuse_data(dev, efuse, len))
+		goto out;
+
+	if (found) {
+		mt76x2_apply_cal_free_data(dev, efuse);
+	} else {
+		/* FIXME: check if efuse data is complete */
+		found = true;
+		memcpy(dev->mt76.eeprom.data, efuse, len);
+	}
+
+out:
+	if (!found)
+		return -ENOENT;
+
+	return 0;
+}
+
+static inline int
+mt76x2_sign_extend(u32 val, unsigned int size)
+{
+	bool sign = val & BIT(size - 1);
+
+	val &= BIT(size - 1) - 1;
+
+	return sign ? val : -val;
+}
+
+static inline int
+mt76x2_sign_extend_optional(u32 val, unsigned int size)
+{
+	bool enable = val & BIT(size);
+
+	return enable ? mt76x2_sign_extend(val, size) : 0;
+}
+
+static bool
+field_valid(u8 val)
+{
+	return val != 0 && val != 0xff;
+}
+
+static void
+mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
+{
+	s8 *dest = dev->cal.rx.high_gain;
+
+	if (!field_valid(val)) {
+		dest[0] = 0;
+		dest[1] = 0;
+		return;
+	}
+
+	dest[0] = mt76x2_sign_extend(val, 4);
+	dest[1] = mt76x2_sign_extend(val >> 4, 4);
+}
+
+static void
+mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
+{
+	s8 *dest = dev->cal.rx.rssi_offset;
+
+	if (!field_valid(val)) {
+		dest[chain] = 0;
+		return;
+	}
+
+	dest[chain] = mt76x2_sign_extend_optional(val, 7);
+}
+
+static enum mt76x2_cal_channel_group
+mt76x2_get_cal_channel_group(int channel)
+{
+	if (channel >= 184 && channel <= 196)
+		return MT_CH_5G_JAPAN;
+	if (channel <= 48)
+		return MT_CH_5G_UNII_1;
+	if (channel <= 64)
+		return MT_CH_5G_UNII_2;
+	if (channel <= 114)
+		return MT_CH_5G_UNII_2E_1;
+	if (channel <= 144)
+		return MT_CH_5G_UNII_2E_2;
+	return MT_CH_5G_UNII_3;
+}
+
+static u8
+mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
+{
+	enum mt76x2_cal_channel_group group;
+
+	group = mt76x2_get_cal_channel_group(channel);
+	switch (group) {
+	case MT_CH_5G_JAPAN:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
+	case MT_CH_5G_UNII_1:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
+	case MT_CH_5G_UNII_2:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
+	case MT_CH_5G_UNII_2E_1:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
+	case MT_CH_5G_UNII_2E_2:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
+	default:
+		return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
+	}
+}
+
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	int channel = chan->hw_value;
+	s8 lna_5g[3], lna_2g;
+	u8 lna;
+	u16 val;
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
+	else
+		val = mt76x2_get_5g_rx_gain(dev, channel);
+
+	mt76x2_set_rx_gain_group(dev, val);
+
+	if (chan->band == NL80211_BAND_2GHZ) {
+		val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
+		mt76x2_set_rssi_offset(dev, 0, val);
+		mt76x2_set_rssi_offset(dev, 1, val >> 8);
+	} else {
+		val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
+		mt76x2_set_rssi_offset(dev, 0, val);
+		mt76x2_set_rssi_offset(dev, 1, val >> 8);
+	}
+
+	val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
+	lna_2g = val & 0xff;
+	lna_5g[0] = val >> 8;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
+	lna_5g[1] = val >> 8;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
+	lna_5g[2] = val >> 8;
+
+	if (!field_valid(lna_5g[1]))
+		lna_5g[1] = lna_5g[0];
+
+	if (!field_valid(lna_5g[2]))
+		lna_5g[2] = lna_5g[0];
+
+	dev->cal.rx.mcu_gain =  (lna_2g & 0xff);
+	dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
+	dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
+	dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+	if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
+		lna_2g = 0;
+	if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
+		memset(lna_5g, 0, sizeof(lna_5g));
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		lna = lna_2g;
+	else if (channel <= 64)
+		lna = lna_5g[0];
+	else if (channel <= 128)
+		lna = lna_5g[1];
+	else
+		lna = lna_5g[2];
+
+	if (lna == 0xff)
+		lna = 0;
+
+	dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
+}
+
+static s8
+mt76x2_rate_power_val(u8 val)
+{
+	if (!field_valid(val))
+		return 0;
+
+	return mt76x2_sign_extend_optional(val, 7);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t)
+{
+	bool is_5ghz;
+	u16 val;
+
+	is_5ghz = dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ;
+
+	memset(t, 0, sizeof(*t));
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
+	t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
+	t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
+
+	if (is_5ghz)
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
+	else
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
+	t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
+	t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
+
+	if (is_5ghz)
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
+	else
+		val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
+	t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
+	t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
+	t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
+	t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
+	t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
+	t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
+	t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
+	t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
+	t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
+	t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
+	t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
+	t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
+	t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
+	t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
+	if (!is_5ghz)
+		val >>= 8;
+	t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
+}
+
+static void
+mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+		       int chain, int offset)
+{
+	int channel = dev->mt76.chandef.chan->hw_value;
+	int delta_idx;
+	u8 data[6];
+	u16 val;
+
+	if (channel < 6)
+		delta_idx = 3;
+	else if (channel < 11)
+		delta_idx = 4;
+	else
+		delta_idx = 5;
+
+	mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+	t->chain[chain].tssi_slope = data[0];
+	t->chain[chain].tssi_offset = data[1];
+	t->chain[chain].target_power = data[2];
+	t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
+	t->target_power = val >> 8;
+}
+
+static void
+mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
+		       int chain, int offset)
+{
+	int channel = dev->mt76.chandef.chan->hw_value;
+	enum mt76x2_cal_channel_group group;
+	int delta_idx;
+	u16 val;
+	u8 data[5];
+
+	group = mt76x2_get_cal_channel_group(channel);
+	offset += group * MT_TX_POWER_GROUP_SIZE_5G;
+
+	if (channel >= 192)
+		delta_idx = 4;
+	else if (channel >= 484)
+		delta_idx = 3;
+	else if (channel < 44)
+		delta_idx = 3;
+	else if (channel < 52)
+		delta_idx = 4;
+	else if (channel < 58)
+		delta_idx = 3;
+	else if (channel < 98)
+		delta_idx = 4;
+	else if (channel < 106)
+		delta_idx = 3;
+	else if (channel < 116)
+		delta_idx = 4;
+	else if (channel < 130)
+		delta_idx = 3;
+	else if (channel < 149)
+		delta_idx = 4;
+	else if (channel < 157)
+		delta_idx = 3;
+	else
+		delta_idx = 4;
+
+	mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+
+	t->chain[chain].tssi_slope = data[0];
+	t->chain[chain].tssi_offset = data[1];
+	t->chain[chain].target_power = data[2];
+	t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
+	t->target_power = val & 0xff;
+}
+
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+			   struct mt76x2_tx_power_info *t)
+{
+	u16 bw40, bw80;
+
+	memset(t, 0, sizeof(*t));
+
+	bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
+	bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
+
+	if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) {
+		bw40 >>= 8;
+		mt76x2_get_power_info_5g(dev, t, 0, MT_EE_TX_POWER_0_START_5G);
+		mt76x2_get_power_info_5g(dev, t, 1, MT_EE_TX_POWER_1_START_5G);
+	} else {
+		mt76x2_get_power_info_2g(dev, t, 0, MT_EE_TX_POWER_0_START_2G);
+		mt76x2_get_power_info_2g(dev, t, 1, MT_EE_TX_POWER_1_START_2G);
+	}
+
+	if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
+		t->target_power = t->chain[0].target_power;
+
+	t->delta_bw40 = mt76x2_rate_power_val(bw40);
+	t->delta_bw80 = mt76x2_rate_power_val(bw80);
+}
+
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
+{
+	enum nl80211_band band = dev->mt76.chandef.chan->band;
+	u16 val, slope;
+	u8 bounds;
+
+	memset(t, 0, sizeof(*t));
+
+	val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+	if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC))
+		return -EINVAL;
+
+	if (!mt76x2_ext_pa_enabled(dev, band))
+		return -EINVAL;
+
+	val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
+	if (!(val & BIT(7)))
+		return -EINVAL;
+
+	t->temp_25_ref = val & 0x7f;
+	if (band == NL80211_BAND_5GHZ) {
+		slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
+		bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
+	} else {
+		slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
+		bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
+	}
+
+	t->high_slope = slope & 0xff;
+	t->low_slope = slope >> 8;
+	t->lower_bound = 0 - (bounds & 0xf);
+	t->upper_bound = (bounds >> 4) & 0xf;
+
+	return 0;
+}
+
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+	u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
+
+	if (band == NL80211_BAND_5GHZ)
+		return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
+	else
+		return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
+}
+
+int mt76x2_eeprom_init(struct mt76x2_dev *dev)
+{
+	int ret;
+
+	ret = mt76x2_eeprom_load(dev);
+	if (ret)
+		return ret;
+
+	mt76x2_eeprom_parse_hw_cap(dev);
+	mt76x2_eeprom_get_macaddr(dev);
+	mt76_eeprom_override(&dev->mt76);
+	dev->mt76.macaddr[0] &= ~BIT(1);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
new file mode 100644
index 0000000..063d6c84
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_EEPROM_H
+#define __MT76x2_EEPROM_H
+
+#include "mt76x2.h"
+
+enum mt76x2_eeprom_field {
+	MT_EE_CHIP_ID =				0x000,
+	MT_EE_VERSION =				0x002,
+	MT_EE_MAC_ADDR =			0x004,
+	MT_EE_PCI_ID =				0x00A,
+	MT_EE_NIC_CONF_0 =			0x034,
+	MT_EE_NIC_CONF_1 =			0x036,
+	MT_EE_NIC_CONF_2 =			0x042,
+
+	MT_EE_XTAL_TRIM_1 =			0x03a,
+	MT_EE_XTAL_TRIM_2 =			0x09e,
+
+	MT_EE_LNA_GAIN =			0x044,
+	MT_EE_RSSI_OFFSET_2G_0 =		0x046,
+	MT_EE_RSSI_OFFSET_2G_1 =		0x048,
+	MT_EE_RSSI_OFFSET_5G_0 =		0x04a,
+	MT_EE_RSSI_OFFSET_5G_1 =		0x04c,
+
+	MT_EE_TX_POWER_DELTA_BW40 =		0x050,
+	MT_EE_TX_POWER_DELTA_BW80 =		0x052,
+
+	MT_EE_TX_POWER_EXT_PA_5G =		0x054,
+
+	MT_EE_TX_POWER_0_START_2G =		0x056,
+	MT_EE_TX_POWER_1_START_2G =		0x05c,
+
+	/* used as byte arrays */
+#define MT_TX_POWER_GROUP_SIZE_5G		5
+#define MT_TX_POWER_GROUPS_5G			6
+	MT_EE_TX_POWER_0_START_5G =		0x062,
+
+	MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA =	0x074,
+	MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE =	0x076,
+
+	MT_EE_TX_POWER_1_START_5G =		0x080,
+
+	MT_EE_TX_POWER_CCK =			0x0a0,
+	MT_EE_TX_POWER_OFDM_2G_6M =		0x0a2,
+	MT_EE_TX_POWER_OFDM_2G_24M =		0x0a4,
+	MT_EE_TX_POWER_OFDM_5G_6M =		0x0b2,
+	MT_EE_TX_POWER_OFDM_5G_24M =		0x0b4,
+	MT_EE_TX_POWER_HT_MCS0 =		0x0a6,
+	MT_EE_TX_POWER_HT_MCS4 =		0x0a8,
+	MT_EE_TX_POWER_HT_MCS8 =		0x0aa,
+	MT_EE_TX_POWER_HT_MCS12 =		0x0ac,
+	MT_EE_TX_POWER_VHT_MCS0 =		0x0ba,
+	MT_EE_TX_POWER_VHT_MCS4 =		0x0bc,
+	MT_EE_TX_POWER_VHT_MCS8 =		0x0be,
+
+	MT_EE_RF_TEMP_COMP_SLOPE_5G =		0x0f2,
+	MT_EE_RF_TEMP_COMP_SLOPE_2G =		0x0f4,
+
+	MT_EE_RF_2G_TSSI_OFF_TXPOWER =		0x0f6,
+	MT_EE_RF_2G_RX_HIGH_GAIN =		0x0f8,
+	MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN =	0x0fa,
+	MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN =	0x0fc,
+	MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN =	0x0fe,
+
+	MT_EE_BT_RCAL_RESULT =			0x138,
+	MT_EE_BT_VCDL_CALIBRATION =		0x13c,
+	MT_EE_BT_PMUCFG =			0x13e,
+
+	__MT_EE_MAX
+};
+
+#define MT_EE_NIC_CONF_0_PA_INT_2G		BIT(8)
+#define MT_EE_NIC_CONF_0_PA_INT_5G		BIT(9)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE		GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC		BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G		BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G		BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN		BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM		GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM		GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV		BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION		GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE		BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD		GENMASK(15, 13)
+
+enum mt76x2_board_type {
+	BOARD_TYPE_2GHZ = 1,
+	BOARD_TYPE_5GHZ = 2,
+};
+
+enum mt76x2_cal_channel_group {
+	MT_CH_5G_JAPAN,
+	MT_CH_5G_UNII_1,
+	MT_CH_5G_UNII_2,
+	MT_CH_5G_UNII_2E_1,
+	MT_CH_5G_UNII_2E_2,
+	MT_CH_5G_UNII_3,
+	__MT_CH_MAX
+};
+
+struct mt76x2_tx_power_info {
+	u8 target_power;
+
+	s8 delta_bw40;
+	s8 delta_bw80;
+
+	struct {
+		s8 tssi_slope;
+		s8 tssi_offset;
+		s8 target_power;
+		s8 delta;
+	} chain[MT_MAX_CHAINS];
+};
+
+struct mt76x2_temp_comp {
+	u8 temp_25_ref;
+	int lower_bound; /* J */
+	int upper_bound; /* J */
+	unsigned int high_slope; /* J / dB */
+	unsigned int low_slope; /* J / dB */
+};
+
+static inline int
+mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
+{
+	if ((field & 1) || field >= __MT_EE_MAX)
+		return -1;
+
+	return get_unaligned_le16(dev->mt76.eeprom.data + field);
+}
+
+void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t);
+void mt76x2_get_power_info(struct mt76x2_dev *dev,
+			   struct mt76x2_tx_power_info *t);
+int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
+bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
+void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
+
+static inline bool
+mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
+{
+	return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+	       MT_EE_NIC_CONF_1_TEMP_TX_ALC;
+}
+
+static inline bool
+mt76x2_tssi_enabled(struct mt76x2_dev *dev)
+{
+	return !mt76x2_temp_tx_alc_enabled(dev) &&
+	       (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
+		MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static inline bool
+mt76x2_has_ext_lna(struct mt76x2_dev *dev)
+{
+	u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
+
+	if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+		return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
+	else
+		return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
new file mode 100644
index 0000000..d3f03a8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_mcu.h"
+
+struct mt76x2_reg_pair {
+	u32 reg;
+	u32 value;
+};
+
+static bool
+mt76x2_wait_for_mac(struct mt76x2_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 500; i++) {
+		switch (mt76_rr(dev, MT_MAC_CSR0)) {
+		case 0:
+		case ~0:
+			break;
+		default:
+			return true;
+		}
+		usleep_range(5000, 10000);
+	}
+
+	return false;
+}
+
+static bool
+wait_for_wpdma(struct mt76x2_dev *dev)
+{
+	return mt76_poll(dev, MT_WPDMA_GLO_CFG,
+			 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+			 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
+			 0, 1000);
+}
+
+static void
+mt76x2_mac_pbf_init(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	val = MT_PBF_SYS_CTRL_MCU_RESET |
+	      MT_PBF_SYS_CTRL_DMA_RESET |
+	      MT_PBF_SYS_CTRL_MAC_RESET |
+	      MT_PBF_SYS_CTRL_PBF_RESET |
+	      MT_PBF_SYS_CTRL_ASY_RESET;
+
+	mt76_set(dev, MT_PBF_SYS_CTRL, val);
+	mt76_clear(dev, MT_PBF_SYS_CTRL, val);
+
+	mt76_wr(dev, MT_PBF_TX_MAX_PCNT, 0xefef3f1f);
+	mt76_wr(dev, MT_PBF_RX_MAX_PCNT, 0xfebf);
+}
+
+static void
+mt76x2_write_reg_pairs(struct mt76x2_dev *dev,
+		       const struct mt76x2_reg_pair *data, int len)
+{
+	while (len > 0) {
+		mt76_wr(dev, data->reg, data->value);
+		len--;
+		data++;
+	}
+}
+
+static void
+mt76_write_mac_initvals(struct mt76x2_dev *dev)
+{
+#define DEFAULT_PROT_CFG				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f) |	\
+	 MT_PROT_CFG_RTS_THRESH)
+
+#define DEFAULT_PROT_CFG_20				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2004) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x17))
+
+#define DEFAULT_PROT_CFG_40				\
+	(FIELD_PREP(MT_PROT_CFG_RATE, 0x2084) |		\
+	 FIELD_PREP(MT_PROT_CFG_CTRL, 1) |		\
+	 FIELD_PREP(MT_PROT_CFG_NAV, 1) |			\
+	 FIELD_PREP(MT_PROT_CFG_TXOP_ALLOW, 0x3f))
+
+	static const struct mt76x2_reg_pair vals[] = {
+		/* Copied from MediaTek reference source */
+		{ MT_PBF_SYS_CTRL,		0x00080c00 },
+		{ MT_PBF_CFG,			0x1efebcff },
+		{ MT_FCE_PSE_CTRL,		0x00000001 },
+		{ MT_MAC_SYS_CTRL,		0x0000000c },
+		{ MT_MAX_LEN_CFG,		0x003e3f00 },
+		{ MT_AMPDU_MAX_LEN_20M1S,	0xaaa99887 },
+		{ MT_AMPDU_MAX_LEN_20M2S,	0x000000aa },
+		{ MT_XIFS_TIME_CFG,		0x33a40d0a },
+		{ MT_BKOFF_SLOT_CFG,		0x00000209 },
+		{ MT_TBTT_SYNC_CFG,		0x00422010 },
+		{ MT_PWR_PIN_CFG,		0x00000000 },
+		{ 0x1238,			0x001700c8 },
+		{ MT_TX_SW_CFG0,		0x00101001 },
+		{ MT_TX_SW_CFG1,		0x00010000 },
+		{ MT_TX_SW_CFG2,		0x00000000 },
+		{ MT_TXOP_CTRL_CFG,		0x0400583f },
+		{ MT_TX_RTS_CFG,		0x00100020 },
+		{ MT_TX_TIMEOUT_CFG,		0x000a2290 },
+		{ MT_TX_RETRY_CFG,		0x47f01f0f },
+		{ MT_EXP_ACK_TIME,		0x002c00dc },
+		{ MT_TX_PROT_CFG6,		0xe3f42004 },
+		{ MT_TX_PROT_CFG7,		0xe3f42084 },
+		{ MT_TX_PROT_CFG8,		0xe3f42104 },
+		{ MT_PIFS_TX_CFG,		0x00060fff },
+		{ MT_RX_FILTR_CFG,		0x00015f97 },
+		{ MT_LEGACY_BASIC_RATE,		0x0000017f },
+		{ MT_HT_BASIC_RATE,		0x00004003 },
+		{ MT_PN_PAD_MODE,		0x00000002 },
+		{ MT_TXOP_HLDR_ET,		0x00000002 },
+		{ 0xa44,			0x00000000 },
+		{ MT_HEADER_TRANS_CTRL_REG,	0x00000000 },
+		{ MT_TSO_CTRL,			0x00000000 },
+		{ MT_AUX_CLK_CFG,		0x00000000 },
+		{ MT_DACCLK_EN_DLY_CFG,		0x00000000 },
+		{ MT_TX_ALC_CFG_4,		0x00000000 },
+		{ MT_TX_ALC_VGA3,		0x00000000 },
+		{ MT_TX_PWR_CFG_0,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_1,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_2,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_3,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_4,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_7,		0x3a3a3a3a },
+		{ MT_TX_PWR_CFG_8,		0x0000003a },
+		{ MT_TX_PWR_CFG_9,		0x0000003a },
+		{ MT_EFUSE_CTRL,		0x0000d000 },
+		{ MT_PAUSE_ENABLE_CONTROL1,	0x0000000a },
+		{ MT_FCE_WLAN_FLOW_CONTROL1,	0x60401c18 },
+		{ MT_WPDMA_DELAY_INT_CFG,	0x94ff0000 },
+		{ MT_TX_SW_CFG3,		0x00000004 },
+		{ MT_HT_FBK_TO_LEGACY,		0x00001818 },
+		{ MT_VHT_HT_FBK_CFG1,		0xedcba980 },
+		{ MT_PROT_AUTO_TX_CFG,		0x00830083 },
+		{ MT_HT_CTRL_CFG,		0x000001ff },
+	};
+	struct mt76x2_reg_pair prot_vals[] = {
+		{ MT_CCK_PROT_CFG,		DEFAULT_PROT_CFG },
+		{ MT_OFDM_PROT_CFG,		DEFAULT_PROT_CFG },
+		{ MT_MM20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_MM40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+		{ MT_GF20_PROT_CFG,		DEFAULT_PROT_CFG_20 },
+		{ MT_GF40_PROT_CFG,		DEFAULT_PROT_CFG_40 },
+	};
+
+	mt76x2_write_reg_pairs(dev, vals, ARRAY_SIZE(vals));
+	mt76x2_write_reg_pairs(dev, prot_vals, ARRAY_SIZE(prot_vals));
+}
+
+static void
+mt76x2_fixup_xtal(struct mt76x2_dev *dev)
+{
+	u16 eep_val;
+	s8 offset = 0;
+
+	eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
+
+	offset = eep_val & 0x7f;
+	if ((eep_val & 0xff) == 0xff)
+		offset = 0;
+	else if (eep_val & 0x80)
+		offset = 0 - offset;
+
+	eep_val >>= 8;
+	if (eep_val == 0x00 || eep_val == 0xff) {
+		eep_val = mt76x2_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
+		eep_val &= 0xff;
+
+		if (eep_val == 0x00 || eep_val == 0xff)
+			eep_val = 0x14;
+	}
+
+	eep_val &= 0x7f;
+	mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
+	mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
+
+	eep_val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
+	case 0:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
+		break;
+	case 1:
+		mt76_wr(dev, MT_XO_CTRL7, 0x5c1feed0);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+mt76x2_init_beacon_offsets(struct mt76x2_dev *dev)
+{
+	u16 base = MT_BEACON_BASE;
+	u32 regs[4] = {};
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		u16 addr = dev->beacon_offsets[i];
+
+		regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+	}
+
+	for (i = 0; i < 4; i++)
+		mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard)
+{
+	static const u8 null_addr[ETH_ALEN] = {};
+	const u8 *macaddr = dev->mt76.macaddr;
+	u32 val;
+	int i, k;
+
+	if (!mt76x2_wait_for_mac(dev))
+		return -ETIMEDOUT;
+
+	val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+
+	val &= ~(MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+		 MT_WPDMA_GLO_CFG_RX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_RX_DMA_BUSY |
+		 MT_WPDMA_GLO_CFG_DMA_BURST_SIZE);
+	val |= FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3);
+
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+	mt76x2_mac_pbf_init(dev);
+	mt76_write_mac_initvals(dev);
+	mt76x2_fixup_xtal(dev);
+
+	mt76_clear(dev, MT_MAC_SYS_CTRL,
+		   MT_MAC_SYS_CTRL_RESET_CSR |
+		   MT_MAC_SYS_CTRL_RESET_BBP);
+
+	if (is_mt7612(dev))
+		mt76_clear(dev, MT_COEXCFG0, MT_COEXCFG0_COEX_EN);
+
+	mt76_set(dev, MT_EXT_CCA_CFG, 0x0000f000);
+	mt76_clear(dev, MT_TX_ALC_CFG_4, BIT(31));
+
+	mt76_wr(dev, MT_RF_BYPASS_0, 0x06000000);
+	mt76_wr(dev, MT_RF_SETTING_0, 0x08800000);
+	usleep_range(5000, 10000);
+	mt76_wr(dev, MT_RF_BYPASS_0, 0x00000000);
+
+	mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401);
+	mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
+
+	mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
+	mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
+
+	mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
+	mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
+		FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
+		MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+	/* Fire a pre-TBTT interrupt 8 ms before TBTT */
+	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+		       8 << 4);
+	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+		       MT_DFS_GP_INTERVAL);
+	mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+	mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+	if (!hard)
+		return 0;
+
+	for (i = 0; i < 256 / 32; i++)
+		mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
+
+	for (i = 0; i < 256; i++)
+		mt76x2_mac_wcid_setup(dev, i, 0, NULL);
+
+	for (i = 0; i < 16; i++)
+		for (k = 0; k < 4; k++)
+			mt76x2_mac_shared_key_setup(dev, i, k, NULL);
+
+	for (i = 0; i < 8; i++) {
+		mt76x2_mac_set_bssid(dev, i, null_addr);
+		mt76x2_mac_set_beacon(dev, i, NULL);
+	}
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	mt76_set(dev, MT_MAC_APC_BSSID_H(0), MT_MAC_APC_BSSID0_H_EN);
+
+	mt76_wr(dev, MT_CH_TIME_CFG,
+		MT_CH_TIME_CFG_TIMER_EN |
+		MT_CH_TIME_CFG_TX_AS_BUSY |
+		MT_CH_TIME_CFG_RX_AS_BUSY |
+		MT_CH_TIME_CFG_NAV_AS_BUSY |
+		MT_CH_TIME_CFG_EIFS_AS_BUSY |
+		FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
+
+	mt76x2_init_beacon_offsets(dev);
+
+	mt76x2_set_tx_ackto(dev);
+
+	return 0;
+}
+
+int mt76x2_mac_start(struct mt76x2_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+	for (i = 0; i < 16; i++)
+		mt76_rr(dev, MT_TX_STAT_FIFO);
+
+	memset(dev->aggr_stats, 0, sizeof(dev->aggr_stats));
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+	wait_for_wpdma(dev);
+	usleep_range(50, 100);
+
+	mt76_set(dev, MT_WPDMA_GLO_CFG,
+		 MT_WPDMA_GLO_CFG_TX_DMA_EN |
+		 MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+	mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
+
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+
+	mt76x2_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+			       MT_INT_TX_STAT);
+
+	return 0;
+}
+
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force)
+{
+	bool stopped = false;
+	u32 rts_cfg;
+	int i;
+
+	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+	rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT);
+
+	/* Wait for MAC to become idle */
+	for (i = 0; i < 300; i++) {
+		if (mt76_rr(dev, MT_MAC_STATUS) &
+		    (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX))
+			continue;
+
+		if (mt76_rr(dev, MT_BBP(IBI, 12)))
+			continue;
+
+		stopped = true;
+		break;
+	}
+
+	if (force && !stopped) {
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(1));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(1));
+
+		mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+		mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+	}
+
+	mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg);
+}
+
+void mt76x2_mac_resume(struct mt76x2_dev *dev)
+{
+	mt76_wr(dev, MT_MAC_SYS_CTRL,
+		MT_MAC_SYS_CTRL_ENABLE_TX |
+		MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+static void
+mt76x2_power_on_rf_patch(struct mt76x2_dev *dev)
+{
+	mt76_set(dev, 0x10130, BIT(0) | BIT(16));
+	udelay(1);
+
+	mt76_clear(dev, 0x1001c, 0xff);
+	mt76_set(dev, 0x1001c, 0x30);
+
+	mt76_wr(dev, 0x10014, 0x484f);
+	udelay(1);
+
+	mt76_set(dev, 0x10130, BIT(17));
+	udelay(125);
+
+	mt76_clear(dev, 0x10130, BIT(16));
+	udelay(50);
+
+	mt76_set(dev, 0x1014c, BIT(19) | BIT(20));
+}
+
+static void
+mt76x2_power_on_rf(struct mt76x2_dev *dev, int unit)
+{
+	int shift = unit ? 8 : 0;
+
+	/* Enable RF BG */
+	mt76_set(dev, 0x10130, BIT(0) << shift);
+	udelay(10);
+
+	/* Enable RFDIG LDO/AFE/ABB/ADDA */
+	mt76_set(dev, 0x10130, (BIT(1) | BIT(3) | BIT(4) | BIT(5)) << shift);
+	udelay(10);
+
+	/* Switch RFDIG power to internal LDO */
+	mt76_clear(dev, 0x10130, BIT(2) << shift);
+	udelay(10);
+
+	mt76x2_power_on_rf_patch(dev);
+
+	mt76_set(dev, 0x530, 0xf);
+}
+
+static void
+mt76x2_power_on(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	/* Turn on WL MTCMOS */
+	mt76_set(dev, MT_WLAN_MTC_CTRL, MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP);
+
+	val = MT_WLAN_MTC_CTRL_STATE_UP |
+	      MT_WLAN_MTC_CTRL_PWR_ACK |
+	      MT_WLAN_MTC_CTRL_PWR_ACK_S;
+
+	mt76_poll(dev, MT_WLAN_MTC_CTRL, val, val, 1000);
+
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0x7f << 16);
+	udelay(10);
+
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+	udelay(10);
+
+	mt76_set(dev, MT_WLAN_MTC_CTRL, 0xf << 24);
+	mt76_clear(dev, MT_WLAN_MTC_CTRL, 0xfff);
+
+	/* Turn on AD/DA power down */
+	mt76_clear(dev, 0x11204, BIT(3));
+
+	/* WLAN function enable */
+	mt76_set(dev, 0x10080, BIT(0));
+
+	/* Release BBP software reset */
+	mt76_clear(dev, 0x10064, BIT(18));
+
+	mt76x2_power_on_rf(dev, 0);
+	mt76x2_power_on_rf(dev, 1);
+}
+
+void mt76x2_set_tx_ackto(struct mt76x2_dev *dev)
+{
+	u8 ackto, sifs, slottime = dev->slottime;
+
+	slottime += 3 * dev->coverage_class;
+
+	sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+			      MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+	ackto = slottime + sifs;
+	mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+		       MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+
+static void
+mt76x2_set_wlan_state(struct mt76x2_dev *dev, bool enable)
+{
+	u32 val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	if (enable)
+		val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+			MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+	else
+		val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN |
+			 MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+}
+
+static void
+mt76x2_reset_wlan(struct mt76x2_dev *dev, bool enable)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+
+	val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+	if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+		val |= MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+		mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+		udelay(20);
+
+		val &= ~MT_WLAN_FUN_CTRL_WLAN_RESET_RF;
+	}
+
+	mt76_wr(dev, MT_WLAN_FUN_CTRL, val);
+	udelay(20);
+
+	mt76x2_set_wlan_state(dev, enable);
+}
+
+int mt76x2_init_hardware(struct mt76x2_dev *dev)
+{
+	static const u16 beacon_offsets[16] = {
+		/* 1024 byte per beacon */
+		0xc000,
+		0xc400,
+		0xc800,
+		0xcc00,
+		0xd000,
+		0xd400,
+		0xd800,
+		0xdc00,
+
+		/* BSS idx 8-15 not used for beacons */
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+		0xc000,
+	};
+	u32 val;
+	int ret;
+
+	dev->beacon_offsets = beacon_offsets;
+	tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
+		     (unsigned long) dev);
+
+	dev->chainmask = 0x202;
+	dev->global_wcid.idx = 255;
+	dev->global_wcid.hw_key_idx = -1;
+	dev->slottime = 9;
+
+	val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
+	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
+	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
+	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
+	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
+	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
+
+	mt76x2_reset_wlan(dev, true);
+	mt76x2_power_on(dev);
+
+	ret = mt76x2_eeprom_init(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_mac_reset(dev, true);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_dma_init(dev);
+	if (ret)
+		return ret;
+
+	set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+	ret = mt76x2_mac_start(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76x2_mcu_init(dev);
+	if (ret)
+		return ret;
+
+	mt76x2_mac_stop(dev, false);
+	dev->rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
+
+	return 0;
+}
+
+void mt76x2_stop_hardware(struct mt76x2_dev *dev)
+{
+	cancel_delayed_work_sync(&dev->cal_work);
+	cancel_delayed_work_sync(&dev->mac_work);
+	mt76x2_mcu_set_radio_state(dev, false);
+	mt76x2_mac_stop(dev, false);
+}
+
+void mt76x2_cleanup(struct mt76x2_dev *dev)
+{
+	mt76x2_stop_hardware(dev);
+	mt76x2_dma_cleanup(dev);
+	mt76x2_mcu_cleanup(dev);
+}
+
+struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
+{
+	static const struct mt76_driver_ops drv_ops = {
+		.txwi_size = sizeof(struct mt76x2_txwi),
+		.update_survey = mt76x2_update_channel,
+		.tx_prepare_skb = mt76x2_tx_prepare_skb,
+		.tx_complete_skb = mt76x2_tx_complete_skb,
+		.rx_skb = mt76x2_queue_rx_skb,
+		.rx_poll_complete = mt76x2_rx_poll_complete,
+	};
+	struct ieee80211_hw *hw;
+	struct mt76x2_dev *dev;
+
+	hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops);
+	if (!hw)
+		return NULL;
+
+	dev = hw->priv;
+	dev->mt76.dev = pdev;
+	dev->mt76.hw = hw;
+	dev->mt76.drv = &drv_ops;
+	mutex_init(&dev->mutex);
+	spin_lock_init(&dev->irq_lock);
+
+	return dev;
+}
+
+static void mt76x2_regd_notifier(struct wiphy *wiphy,
+				 struct regulatory_request *request)
+{
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct mt76x2_dev *dev = hw->priv;
+
+	dev->dfs_pd.region = request->dfs_region;
+}
+
+#define CCK_RATE(_idx, _rate) {					\
+	.bitrate = _rate,					\
+	.flags = IEEE80211_RATE_SHORT_PREAMBLE,			\
+	.hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),	\
+}
+
+#define OFDM_RATE(_idx, _rate) {				\
+	.bitrate = _rate,					\
+	.hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,		\
+	.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,	\
+}
+
+static struct ieee80211_rate mt76x2_rates[] = {
+	CCK_RATE(0, 10),
+	CCK_RATE(1, 20),
+	CCK_RATE(2, 55),
+	CCK_RATE(3, 110),
+	OFDM_RATE(0, 60),
+	OFDM_RATE(1, 90),
+	OFDM_RATE(2, 120),
+	OFDM_RATE(3, 180),
+	OFDM_RATE(4, 240),
+	OFDM_RATE(5, 360),
+	OFDM_RATE(6, 480),
+	OFDM_RATE(7, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_ADHOC)
+	}, {
+		.max = 8,
+		.types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+			 BIT(NL80211_IFTYPE_AP)
+	 },
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+	{
+		.limits = if_limits,
+		.n_limits = ARRAY_SIZE(if_limits),
+		.max_interfaces = 8,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+		.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				       BIT(NL80211_CHAN_WIDTH_20) |
+				       BIT(NL80211_CHAN_WIDTH_40) |
+				       BIT(NL80211_CHAN_WIDTH_80),
+	}
+};
+
+static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+				  u8 delay_off)
+{
+	struct mt76x2_dev *dev = container_of(mt76, struct mt76x2_dev,
+					      mt76);
+	u32 val;
+
+	val = MT_LED_STATUS_DURATION(0xff) |
+	      MT_LED_STATUS_OFF(delay_off) |
+	      MT_LED_STATUS_ON(delay_on);
+
+	mt76_wr(dev, MT_LED_S0(mt76->led_pin), val);
+	mt76_wr(dev, MT_LED_S1(mt76->led_pin), val);
+
+	val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
+	      MT_LED_CTRL_KICK(mt76->led_pin);
+	if (mt76->led_al)
+		val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+	mt76_wr(dev, MT_LED_CTRL, val);
+}
+
+static int mt76x2_led_set_blink(struct led_classdev *led_cdev,
+				unsigned long *delay_on,
+				unsigned long *delay_off)
+{
+	struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+	u8 delta_on, delta_off;
+
+	delta_off = max_t(u8, *delay_off / 10, 1);
+	delta_on = max_t(u8, *delay_on / 10, 1);
+
+	mt76x2_led_set_config(mt76, delta_on, delta_off);
+	return 0;
+}
+
+static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
+				      enum led_brightness brightness)
+{
+	struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
+					     led_cdev);
+
+	if (!brightness)
+		mt76x2_led_set_config(mt76, 0, 0xff);
+	else
+		mt76x2_led_set_config(mt76, 0xff, 0);
+}
+
+int mt76x2_register_device(struct mt76x2_dev *dev)
+{
+	struct ieee80211_hw *hw = mt76_hw(dev);
+	struct wiphy *wiphy = hw->wiphy;
+	void *status_fifo;
+	int fifo_size;
+	int i, ret;
+
+	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x2_tx_status));
+	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
+	if (!status_fifo)
+		return -ENOMEM;
+
+	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
+
+	ret = mt76x2_init_hardware(dev);
+	if (ret)
+		return ret;
+
+	hw->queues = 4;
+	hw->max_rates = 1;
+	hw->max_report_rates = 7;
+	hw->max_rate_tries = 1;
+	hw->extra_tx_headroom = 2;
+
+	hw->sta_data_size = sizeof(struct mt76x2_sta);
+	hw->vif_data_size = sizeof(struct mt76x2_vif);
+
+	for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+		u8 *addr = dev->macaddr_list[i].addr;
+
+		memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+		if (!i)
+			continue;
+
+		addr[0] |= BIT(1);
+		addr[0] ^= ((i - 1) << 2);
+	}
+	wiphy->addresses = dev->macaddr_list;
+	wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+
+	wiphy->iface_combinations = if_comb;
+	wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+	wiphy->reg_notifier = mt76x2_regd_notifier;
+
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+	ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+	INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
+	INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
+
+	dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+	dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+	mt76x2_dfs_init_detector(dev);
+
+	/* init led callbacks */
+	dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+	dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+
+	ret = mt76_register_device(&dev->mt76, true, mt76x2_rates,
+				   ARRAY_SIZE(mt76x2_rates));
+	if (ret)
+		goto fail;
+
+	mt76x2_init_debugfs(dev);
+
+	return 0;
+
+fail:
+	mt76x2_stop_hardware(dev);
+	return ret;
+}
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
new file mode 100644
index 0000000..39fc1d7b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+#include "mt76x2_trace.h"
+
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
+{
+	idx &= 7;
+	mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+	mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+		       get_unaligned_le16(addr + 4));
+}
+
+static void
+mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (idx >= 8)
+			idx = 0;
+
+		if (status->band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8) {
+			idx -= 8;
+			status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+		}
+
+		if (idx >= 4)
+			idx = 0;
+
+		status->rate_idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		status->enc_flags |= RX_ENC_FLAG_HT_GF;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		status->encoding = RX_ENC_HT;
+		status->rate_idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		status->encoding = RX_ENC_VHT;
+		status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
+		status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	if (rate & MT_RXWI_RATE_LDPC)
+		status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+	if (rate & MT_RXWI_RATE_SGI)
+		status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+	if (rate & MT_RXWI_RATE_STBC)
+		status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		status->bw = RATE_INFO_BW_40;
+		break;
+	case MT_PHY_BW_80:
+		status->bw = RATE_INFO_BW_80;
+		break;
+	default:
+		break;
+	}
+}
+
+static __le16
+mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
+		       const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+	u16 rateval;
+	u8 phy, rate_idx;
+	u8 nss = 1;
+	u8 bw = 0;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 4);
+		phy = MT_PHY_TYPE_VHT;
+		if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+			bw = 2;
+		else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		rate_idx = rate->idx;
+		nss = 1 + (rate->idx >> 3);
+		phy = MT_PHY_TYPE_HT;
+		if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+			phy = MT_PHY_TYPE_HT_GF;
+		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+			bw = 1;
+	} else {
+		const struct ieee80211_rate *r;
+		int band = dev->mt76.chandef.chan->band;
+		u16 val;
+
+		r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+		if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+			val = r->hw_value_short;
+		else
+			val = r->hw_value;
+
+		phy = val >> 8;
+		rate_idx = val & 0xff;
+		bw = 0;
+	}
+
+	rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+	rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
+	if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+		rateval |= MT_RXWI_RATE_SGI;
+
+	*nss_val = nss;
+	return cpu_to_le16(rateval);
+}
+
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
+{
+	u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
+	u32 bit = MT_WCID_DROP_MASK(idx);
+
+	/* prevent unnecessary writes */
+	if ((val & bit) != (bit * drop))
+		mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
+}
+
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+			      const struct ieee80211_tx_rate *rate)
+{
+	spin_lock_bh(&dev->mt76.lock);
+	wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+	wcid->tx_rate_set = true;
+	spin_unlock_bh(&dev->mt76.lock);
+}
+
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+			   struct sk_buff *skb, struct mt76_wcid *wcid,
+			   struct ieee80211_sta *sta)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_rate *rate = &info->control.rates[0];
+	u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
+	u16 txwi_flags = 0;
+	u8 nss;
+	s8 txpwr_adj, max_txpwr_adj;
+
+	memset(txwi, 0, sizeof(*txwi));
+
+	if (wcid)
+		txwi->wcid = wcid->idx;
+	else
+		txwi->wcid = 0xff;
+
+	txwi->pktid = 1;
+
+	spin_lock_bh(&dev->mt76.lock);
+	if (rate->idx < 0 || !rate->count) {
+		txwi->rate = wcid->tx_rate;
+		max_txpwr_adj = wcid->max_txpwr_adj;
+		nss = wcid->tx_rate_nss;
+	} else {
+		txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
+		max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
+	}
+	spin_unlock_bh(&dev->mt76.lock);
+
+	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
+					    max_txpwr_adj);
+	txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E4)
+		txwi->txstream = 0x13;
+	else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
+		 !(txwi->rate & cpu_to_le16(rate_ht_mask)))
+		txwi->txstream = 0x93;
+
+	if (info->flags & IEEE80211_TX_CTL_LDPC)
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
+	if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
+		txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
+	if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
+		txwi_flags |= MT_TXWI_FLAGS_MMPS;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+		txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+		txwi->pktid |= MT_TXWI_PKTID_PROBE;
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+		u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+		ba_size <<= sta->ht_cap.ampdu_factor;
+		ba_size = min_t(int, 63, ba_size - 1);
+		if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+			ba_size = 0;
+		txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+		txwi_flags |= MT_TXWI_FLAGS_AMPDU |
+			 FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+				    sta->ht_cap.ampdu_density);
+	}
+
+	txwi->flags |= cpu_to_le16(txwi_flags);
+	txwi->len_ctl = cpu_to_le16(skb->len);
+}
+
+static void mt76x2_remove_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	memmove(skb->data + 2, skb->data, len);
+	skb_pull(skb, 2);
+}
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+			  void *rxi)
+{
+	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+	struct mt76x2_rxwi *rxwi = rxi;
+	u32 ctl = le32_to_cpu(rxwi->ctl);
+	u16 rate = le16_to_cpu(rxwi->rate);
+	int len;
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
+		mt76x2_remove_hdr_pad(skb);
+
+	if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+		status->flag |= RX_FLAG_DECRYPTED;
+		status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+	}
+
+	len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+	if (WARN_ON_ONCE(len > skb->len))
+		return -EINVAL;
+
+	pskb_trim(skb, len);
+	status->chains = BIT(0) | BIT(1);
+	status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
+	status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
+	status->signal = max(status->chain_signal[0], status->chain_signal[1]);
+	status->freq = dev->mt76.chandef.chan->center_freq;
+	status->band = dev->mt76.chandef.chan->band;
+
+	mt76x2_mac_process_rate(status, rate);
+
+	return 0;
+}
+
+static void
+mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
+			   enum nl80211_band band)
+{
+	u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
+
+	txrate->idx = 0;
+	txrate->flags = 0;
+	txrate->count = 1;
+
+	switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
+	case MT_PHY_TYPE_OFDM:
+		if (band == NL80211_BAND_2GHZ)
+			idx += 4;
+
+		txrate->idx = idx;
+		return;
+	case MT_PHY_TYPE_CCK:
+		if (idx >= 8)
+			idx -= 8;
+
+		txrate->idx = idx;
+		return;
+	case MT_PHY_TYPE_HT_GF:
+		txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+		/* fall through */
+	case MT_PHY_TYPE_HT:
+		txrate->flags |= IEEE80211_TX_RC_MCS;
+		txrate->idx = idx;
+		break;
+	case MT_PHY_TYPE_VHT:
+		txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
+		txrate->idx = idx;
+		break;
+	default:
+		WARN_ON(1);
+		return;
+	}
+
+	switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
+	case MT_PHY_BW_20:
+		break;
+	case MT_PHY_BW_40:
+		txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		break;
+	case MT_PHY_BW_80:
+		txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	if (rate & MT_RXWI_RATE_SGI)
+		txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
+			  struct ieee80211_tx_info *info,
+			  struct mt76x2_tx_status *st, int n_frames)
+{
+	struct ieee80211_tx_rate *rate = info->status.rates;
+	int cur_idx, last_rate;
+	int i;
+
+	if (!n_frames)
+		return;
+
+	last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+	mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
+				 dev->mt76.chandef.chan->band);
+	if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+		rate[last_rate + 1].idx = -1;
+
+	cur_idx = rate[last_rate].idx + st->retry;
+	for (i = 0; i <= last_rate; i++) {
+		rate[i].flags = rate[last_rate].flags;
+		rate[i].idx = max_t(int, 0, cur_idx - i);
+		rate[i].count = 1;
+	}
+
+	if (last_rate > 0)
+		rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+	info->status.ampdu_len = n_frames;
+	info->status.ampdu_ack_len = st->success ? n_frames : 0;
+
+	if (st->pktid & MT_TXWI_PKTID_PROBE)
+		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+	if (st->aggr)
+		info->flags |= IEEE80211_TX_CTL_AMPDU |
+			       IEEE80211_TX_STAT_AMPDU;
+
+	if (!st->ack_req)
+		info->flags |= IEEE80211_TX_CTL_NO_ACK;
+	else if (st->success)
+		info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+static void
+mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
+		      u8 *update)
+{
+	struct ieee80211_tx_info info = {};
+	struct ieee80211_sta *sta = NULL;
+	struct mt76_wcid *wcid = NULL;
+	struct mt76x2_sta *msta = NULL;
+
+	rcu_read_lock();
+	if (stat->wcid < ARRAY_SIZE(dev->wcid))
+		wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+	if (wcid) {
+		void *priv;
+
+		priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
+		sta = container_of(priv, struct ieee80211_sta,
+				   drv_priv);
+	}
+
+	if (msta && stat->aggr) {
+		u32 stat_val, stat_cache;
+
+		stat_val = stat->rate;
+		stat_val |= ((u32) stat->retry) << 16;
+		stat_cache = msta->status.rate;
+		stat_cache |= ((u32) msta->status.retry) << 16;
+
+		if (*update == 0 && stat_val == stat_cache &&
+		    stat->wcid == msta->status.wcid && msta->n_frames < 32) {
+			msta->n_frames++;
+			goto out;
+		}
+
+		mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
+					  msta->n_frames);
+
+		msta->status = *stat;
+		msta->n_frames = 1;
+		*update = 0;
+	} else {
+		mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
+		*update = 1;
+	}
+
+	ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+	rcu_read_unlock();
+}
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
+{
+	struct mt76x2_tx_status stat = {};
+	unsigned long flags;
+	u8 update = 1;
+
+	if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
+		return;
+
+	trace_mac_txstat_poll(dev);
+
+	while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
+		u32 stat1, stat2;
+
+		spin_lock_irqsave(&dev->irq_lock, flags);
+		stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
+		stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
+		if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
+			spin_unlock_irqrestore(&dev->irq_lock, flags);
+			break;
+		}
+
+		spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+		stat.valid = 1;
+		stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
+		stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
+		stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
+		stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
+		stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
+		stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
+		stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+		trace_mac_txstat_fetch(dev, &stat);
+
+		if (!irq) {
+			mt76x2_send_tx_status(dev, &stat, &update);
+			continue;
+		}
+
+		kfifo_put(&dev->txstatus_fifo, stat);
+	}
+}
+
+static void
+mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
+			void *txwi_ptr)
+{
+	struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
+	struct mt76x2_txwi *txwi = txwi_ptr;
+
+	mt76x2_mac_poll_tx_status(dev, false);
+
+	txi->tries = 0;
+	txi->jiffies = jiffies;
+	txi->wcid = txwi->wcid;
+	txi->pktid = txwi->pktid;
+	trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
+	mt76x2_tx_complete(dev, skb);
+}
+
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
+{
+	struct mt76x2_tx_status stat;
+	u8 update = 1;
+
+	while (kfifo_get(&dev->txstatus_fifo, &stat))
+		mt76x2_send_tx_status(dev, &stat, &update);
+}
+
+void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+			    struct mt76_queue_entry *e, bool flush)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+	if (e->txwi)
+		mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+	else
+		dev_kfree_skb_any(e->skb);
+}
+
+static enum mt76x2_cipher_type
+mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+	memset(key_data, 0, 32);
+	if (!key)
+		return MT_CIPHER_NONE;
+
+	if (key->keylen > 32)
+		return MT_CIPHER_NONE;
+
+	memcpy(key_data, key->key, key->keylen);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+		return MT_CIPHER_WEP40;
+	case WLAN_CIPHER_SUITE_WEP104:
+		return MT_CIPHER_WEP104;
+	case WLAN_CIPHER_SUITE_TKIP:
+		return MT_CIPHER_TKIP;
+	case WLAN_CIPHER_SUITE_CCMP:
+		return MT_CIPHER_AES_CCMP;
+	default:
+		return MT_CIPHER_NONE;
+	}
+}
+
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+	struct mt76_wcid_addr addr = {};
+	u32 attr;
+
+	attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+	       FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+	mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+	mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
+	mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
+
+	if (idx >= 128)
+		return;
+
+	if (mac)
+		memcpy(addr.macaddr, mac, ETH_ALEN);
+
+	mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
+}
+
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+			    struct ieee80211_key_conf *key)
+{
+	enum mt76x2_cipher_type cipher;
+	u8 key_data[32];
+	u8 iv_data[8];
+
+	cipher = mt76x2_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
+	mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+	memset(iv_data, 0, sizeof(iv_data));
+	if (key) {
+		mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
+			       !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+		iv_data[3] = key->keyidx << 6;
+		if (cipher >= MT_CIPHER_TKIP)
+			iv_data[3] |= 0x20;
+	}
+
+	mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+	return 0;
+}
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+			      struct ieee80211_key_conf *key)
+{
+	enum mt76x2_cipher_type cipher;
+	u8 key_data[32];
+	u32 val;
+
+	cipher = mt76x2_mac_get_key_info(key, key_data);
+	if (cipher == MT_CIPHER_NONE && key)
+		return -EOPNOTSUPP;
+
+	val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+	val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+	val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+	mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+	mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
+		     sizeof(key_data));
+
+	return 0;
+}
+
+static int
+mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
+{
+	int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+	struct mt76x2_txwi txwi;
+
+	if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
+		return -ENOSPC;
+
+	mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
+	txwi.flags |= cpu_to_le16(MT_TXWI_FLAGS_TS);
+
+	mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+	offset += sizeof(txwi);
+
+	mt76_wr_copy(dev, offset, skb->data, skb->len);
+	return 0;
+}
+
+static int
+__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
+{
+	int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
+	int beacon_addr = dev->beacon_offsets[bcn_idx];
+	int ret = 0;
+	int i;
+
+	/* Prevent corrupt transmissions during update */
+	mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+	if (skb) {
+		ret = mt76_write_beacon(dev, beacon_addr, skb);
+		if (!ret)
+			dev->beacon_data_mask |= BIT(bcn_idx) &
+						 dev->beacon_mask;
+	} else {
+		dev->beacon_data_mask &= ~BIT(bcn_idx);
+		for (i = 0; i < beacon_len; i += 4)
+			mt76_wr(dev, beacon_addr + i, 0);
+	}
+
+	mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+	return ret;
+}
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+			  struct sk_buff *skb)
+{
+	bool force_update = false;
+	int bcn_idx = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+		if (vif_idx == i) {
+			force_update = !!dev->beacons[i] ^ !!skb;
+
+			if (dev->beacons[i])
+				dev_kfree_skb(dev->beacons[i]);
+
+			dev->beacons[i] = skb;
+			__mt76x2_mac_set_beacon(dev, bcn_idx, skb);
+		} else if (force_update && dev->beacons[i]) {
+			__mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
+		}
+
+		bcn_idx += !!dev->beacons[i];
+	}
+
+	for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+		if (!(dev->beacon_data_mask & BIT(i)))
+			break;
+
+		__mt76x2_mac_set_beacon(dev, i, NULL);
+	}
+
+	mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+		       bcn_idx - 1);
+	return 0;
+}
+
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
+{
+	u8 old_mask = dev->beacon_mask;
+	bool en;
+	u32 reg;
+
+	if (val) {
+		dev->beacon_mask |= BIT(vif_idx);
+	} else {
+		dev->beacon_mask &= ~BIT(vif_idx);
+		mt76x2_mac_set_beacon(dev, vif_idx, NULL);
+	}
+
+	if (!!old_mask == !!dev->beacon_mask)
+		return;
+
+	en = dev->beacon_mask;
+
+	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+	reg = MT_BEACON_TIME_CFG_BEACON_TX |
+	      MT_BEACON_TIME_CFG_TBTT_EN |
+	      MT_BEACON_TIME_CFG_TIMER_EN;
+	mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+	if (en)
+		mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+	else
+		mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x2_update_channel(struct mt76_dev *mdev)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	struct mt76_channel_state *state;
+	u32 active, busy;
+
+	state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+	busy = mt76_rr(dev, MT_CH_BUSY);
+	active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+	spin_lock_bh(&dev->mt76.cc_lock);
+	state->cc_busy += busy;
+	state->cc_active += active;
+	spin_unlock_bh(&dev->mt76.cc_lock);
+}
+
+void mt76x2_mac_work(struct work_struct *work)
+{
+	struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
+					    mac_work.work);
+	int i, idx;
+
+	mt76x2_update_channel(&dev->mt76);
+	for (i = 0, idx = 0; i < 16; i++) {
+		u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
+
+		dev->aggr_stats[idx++] += val & 0xffff;
+		dev->aggr_stats[idx++] += val >> 16;
+	}
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+				     MT_CALIBRATE_INTERVAL);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
new file mode 100644
index 0000000..8a8a25e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MAC_H
+#define __MT76x2_MAC_H
+
+#include "mt76.h"
+
+struct mt76x2_dev;
+struct mt76x2_sta;
+struct mt76x2_vif;
+struct mt76x2_txwi;
+
+struct mt76x2_tx_status {
+	u8 valid:1;
+	u8 success:1;
+	u8 aggr:1;
+	u8 ack_req:1;
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+	u16 rate;
+} __packed __aligned(2);
+
+struct mt76x2_tx_info {
+	unsigned long jiffies;
+	u8 tries;
+
+	u8 wcid;
+	u8 pktid;
+	u8 retry;
+};
+
+struct mt76x2_rxwi {
+	__le32 rxinfo;
+
+	__le32 ctl;
+
+	__le16 tid_sn;
+	__le16 rate;
+
+	u8 rssi[4];
+
+	__le32 bbp_rxinfo[4];
+};
+
+#define MT_RXINFO_BA			BIT(0)
+#define MT_RXINFO_DATA			BIT(1)
+#define MT_RXINFO_NULL			BIT(2)
+#define MT_RXINFO_FRAG			BIT(3)
+#define MT_RXINFO_UNICAST		BIT(4)
+#define MT_RXINFO_MULTICAST		BIT(5)
+#define MT_RXINFO_BROADCAST		BIT(6)
+#define MT_RXINFO_MYBSS			BIT(7)
+#define MT_RXINFO_CRCERR		BIT(8)
+#define MT_RXINFO_ICVERR		BIT(9)
+#define MT_RXINFO_MICERR		BIT(10)
+#define MT_RXINFO_AMSDU			BIT(11)
+#define MT_RXINFO_HTC			BIT(12)
+#define MT_RXINFO_RSSI			BIT(13)
+#define MT_RXINFO_L2PAD			BIT(14)
+#define MT_RXINFO_AMPDU			BIT(15)
+#define MT_RXINFO_DECRYPT		BIT(16)
+#define MT_RXINFO_BSSIDX3		BIT(17)
+#define MT_RXINFO_WAPI_KEY		BIT(18)
+#define MT_RXINFO_PN_LEN		GENMASK(21, 19)
+#define MT_RXINFO_SW_FTYPE0		BIT(22)
+#define MT_RXINFO_SW_FTYPE1		BIT(23)
+#define MT_RXINFO_PROBE_RESP		BIT(24)
+#define MT_RXINFO_BEACON		BIT(25)
+#define MT_RXINFO_DISASSOC		BIT(26)
+#define MT_RXINFO_DEAUTH		BIT(27)
+#define MT_RXINFO_ACTION		BIT(28)
+#define MT_RXINFO_TCP_SUM_ERR		BIT(30)
+#define MT_RXINFO_IP_SUM_ERR		BIT(31)
+
+#define MT_RXWI_CTL_WCID		GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX		GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX		GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF			GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN		GENMASK(29, 16)
+#define MT_RXWI_CTL_EOF			BIT(31)
+
+#define MT_RXWI_TID			GENMASK(3, 0)
+#define MT_RXWI_SN			GENMASK(15, 4)
+
+#define MT_RXWI_RATE_INDEX		GENMASK(5, 0)
+#define MT_RXWI_RATE_LDPC		BIT(6)
+#define MT_RXWI_RATE_BW			GENMASK(8, 7)
+#define MT_RXWI_RATE_SGI		BIT(9)
+#define MT_RXWI_RATE_STBC		BIT(10)
+#define MT_RXWI_RATE_LDPC_EXSYM		BIT(11)
+#define MT_RXWI_RATE_PHY		GENMASK(15, 13)
+
+#define MT_RATE_INDEX_VHT_IDX		GENMASK(3, 0)
+#define MT_RATE_INDEX_VHT_NSS		GENMASK(5, 4)
+
+#define MT_TX_PWR_ADJ			GENMASK(3, 0)
+
+enum mt76x2_phy_bandwidth {
+	MT_PHY_BW_20,
+	MT_PHY_BW_40,
+	MT_PHY_BW_80,
+};
+
+#define MT_TXWI_FLAGS_FRAG		BIT(0)
+#define MT_TXWI_FLAGS_MMPS		BIT(1)
+#define MT_TXWI_FLAGS_CFACK		BIT(2)
+#define MT_TXWI_FLAGS_TS		BIT(3)
+#define MT_TXWI_FLAGS_AMPDU		BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY	GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP		GENMASK(9, 8)
+#define MT_TXWI_FLAGS_NDPS		BIT(10)
+#define MT_TXWI_FLAGS_RTSBWSIG		BIT(11)
+#define MT_TXWI_FLAGS_NDP_BW		GENMASK(13, 12)
+#define MT_TXWI_FLAGS_SOUND		BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT	BIT(15)
+
+#define MT_TXWI_ACK_CTL_REQ		BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ		BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW	GENMASK(7, 2)
+
+#define MT_TXWI_PKTID_PROBE		BIT(7)
+
+struct mt76x2_txwi {
+	__le16 flags;
+	__le16 rate;
+	u8 ack_ctl;
+	u8 wcid;
+	__le16 len_ctl;
+	__le32 iv;
+	__le32 eiv;
+	u8 aid;
+	u8 txstream;
+	u8 ctl2;
+	u8 pktid;
+} __packed __aligned(4);
+
+static inline struct mt76x2_tx_info *
+mt76x2_skb_tx_info(struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	return (void *) info->status.status_driver_data;
+}
+
+int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard);
+int mt76x2_mac_start(struct mt76x2_dev *dev);
+void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
+void mt76x2_mac_resume(struct mt76x2_dev *dev);
+void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
+
+int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
+			  void *rxi);
+void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
+			   struct sk_buff *skb, struct mt76_wcid *wcid,
+			   struct ieee80211_sta *sta);
+void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
+			    struct ieee80211_key_conf *key);
+void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
+			      const struct ieee80211_tx_rate *rate);
+void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
+
+int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
+				struct ieee80211_key_conf *key);
+
+int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
+			  struct sk_buff *skb);
+void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
+
+void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
+void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
+
+void mt76x2_mac_work(struct work_struct *work);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
new file mode 100644
index 0000000..2cef48e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+
+static int
+mt76x2_start(struct ieee80211_hw *hw)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	int ret;
+
+	mutex_lock(&dev->mutex);
+
+	ret = mt76x2_mac_start(dev);
+	if (ret)
+		goto out;
+
+	ret = mt76x2_phy_start(dev);
+	if (ret)
+		goto out;
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+				     MT_CALIBRATE_INTERVAL);
+
+	set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+out:
+	mutex_unlock(&dev->mutex);
+	return ret;
+}
+
+static void
+mt76x2_stop(struct ieee80211_hw *hw)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+	clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+	mt76x2_stop_hardware(dev);
+	mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq;
+
+	if (!txq)
+		return;
+
+	mtxq = (struct mt76_txq *) txq->drv_priv;
+	if (txq->sta) {
+		struct mt76x2_sta *sta;
+
+		sta = (struct mt76x2_sta *) txq->sta->drv_priv;
+		mtxq->wcid = &sta->wcid;
+	} else {
+		struct mt76x2_vif *mvif;
+
+		mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
+		mtxq->wcid = &mvif->group_wcid;
+	}
+
+	mt76_txq_init(&dev->mt76, txq);
+}
+
+static int
+mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	unsigned int idx = 0;
+	int ret = 0;
+
+	if (vif->addr[0] & BIT(1))
+		idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
+
+	/*
+	 * Client mode typically only has one configurable BSSID register,
+	 * which is used for bssidx=0. This is linked to the MAC address.
+	 * Since mac80211 allows changing interface types, and we cannot
+	 * force the use of the primary MAC address for a station mode
+	 * interface, we need some other way of configuring a per-interface
+	 * remote BSSID.
+	 * The hardware provides an AP-Client feature, where bssidx 0-7 are
+	 * used for AP mode and bssidx 8-15 for client mode.
+	 * We shift the station interface bss index by 8 to force the
+	 * hardware to recognize the BSSID.
+	 * The resulting bssidx mismatch for unicast frames is ignored by hw.
+	 */
+	if (vif->type == NL80211_IFTYPE_STATION)
+		idx += 8;
+
+	mvif->idx = idx;
+	mvif->group_wcid.idx = 254 - idx;
+	mvif->group_wcid.hw_key_idx = -1;
+	mt76x2_txq_init(dev, vif->txq);
+
+	return ret;
+}
+
+static void
+mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mt76_txq_remove(&dev->mt76, vif->txq);
+}
+
+static int
+mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
+{
+	int ret;
+
+	mt76_set_channel(&dev->mt76);
+
+	tasklet_disable(&dev->pre_tbtt_tasklet);
+	cancel_delayed_work_sync(&dev->cal_work);
+
+	mt76x2_mac_stop(dev, true);
+	ret = mt76x2_phy_set_channel(dev, chandef);
+
+	/* channel cycle counters read-and-clear */
+	mt76_rr(dev, MT_CH_IDLE);
+	mt76_rr(dev, MT_CH_BUSY);
+
+	mt76x2_dfs_init_params(dev);
+
+	mt76x2_mac_resume(dev);
+	tasklet_enable(&dev->pre_tbtt_tasklet);
+
+	return ret;
+}
+
+static int
+mt76x2_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	int ret = 0;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		dev->txpower_conf = hw->conf.power_level * 2;
+
+		if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
+			mt76x2_phy_set_txpower(dev);
+			mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
+		}
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		ieee80211_stop_queues(hw);
+		ret = mt76x2_set_channel(dev, &hw->conf.chandef);
+		ieee80211_wake_queues(hw);
+	}
+
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static void
+mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+			unsigned int *total_flags, u64 multicast)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+		flags |= *total_flags & FIF_##_flag;			\
+		dev->rxfilter &= ~(_hw);				\
+		dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);	\
+	} while (0)
+
+	mutex_lock(&dev->mutex);
+
+	dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+	MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+	MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+	MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+			     MT_RX_FILTR_CFG_CTS |
+			     MT_RX_FILTR_CFG_CFEND |
+			     MT_RX_FILTR_CFG_CFACK |
+			     MT_RX_FILTR_CFG_BA |
+			     MT_RX_FILTR_CFG_CTRL_RSV);
+	MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+	*total_flags = flags;
+	mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+	mutex_unlock(&dev->mutex);
+}
+
+static void
+mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_bss_conf *info, u32 changed)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+	mutex_lock(&dev->mutex);
+
+	if (changed & BSS_CHANGED_BSSID)
+		mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+	if (changed & BSS_CHANGED_BEACON_INT)
+		mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+			       MT_BEACON_TIME_CFG_INTVAL,
+			       info->beacon_int << 4);
+
+	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+		tasklet_disable(&dev->pre_tbtt_tasklet);
+		mt76x2_mac_set_beacon_enable(dev, mvif->idx,
+					     info->enable_beacon);
+		tasklet_enable(&dev->pre_tbtt_tasklet);
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		int slottime = info->use_short_slot ? 9 : 20;
+
+		dev->slottime = slottime;
+		mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+			       MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+	}
+
+	mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	       struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	int ret = 0;
+	int idx = 0;
+	int i;
+
+	mutex_lock(&dev->mutex);
+
+	idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
+	if (idx < 0) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	msta->wcid.idx = idx;
+	msta->wcid.hw_key_idx = -1;
+	mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+	mt76x2_mac_wcid_set_drop(dev, idx, false);
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+		mt76x2_txq_init(dev, sta->txq[i]);
+
+	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+
+out:
+	mutex_unlock(&dev->mutex);
+
+	return ret;
+}
+
+static int
+mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	int idx = msta->wcid.idx;
+	int i;
+
+	mutex_lock(&dev->mutex);
+	rcu_assign_pointer(dev->wcid[idx], NULL);
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+		mt76_txq_remove(&dev->mt76, sta->txq[i]);
+	mt76x2_mac_wcid_set_drop(dev, idx, true);
+	mt76_wcid_free(dev->wcid_mask, idx);
+	mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
+	mutex_unlock(&dev->mutex);
+
+	return 0;
+}
+
+static void
+mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct mt76x2_dev *dev = hw->priv;
+	int idx = msta->wcid.idx;
+
+	switch (cmd) {
+	case STA_NOTIFY_SLEEP:
+		mt76x2_mac_wcid_set_drop(dev, idx, true);
+		mt76_stop_tx_queues(&dev->mt76, sta, true);
+		break;
+	case STA_NOTIFY_AWAKE:
+		mt76x2_mac_wcid_set_drop(dev, idx, false);
+		break;
+	}
+}
+
+static int
+mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+	       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+	       struct ieee80211_key_conf *key)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	struct mt76x2_sta *msta;
+	struct mt76_wcid *wcid;
+	int idx = key->keyidx;
+	int ret;
+
+	/*
+	 * The hardware does not support per-STA RX GTK, fall back
+	 * to software mode for these.
+	 */
+	if ((vif->type == NL80211_IFTYPE_ADHOC ||
+	     vif->type == NL80211_IFTYPE_MESH_POINT) &&
+	    (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+	     key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		return -EOPNOTSUPP;
+
+	msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
+	wcid = msta ? &msta->wcid : &mvif->group_wcid;
+
+	if (cmd == SET_KEY) {
+		key->hw_key_idx = wcid->idx;
+		wcid->hw_key_idx = idx;
+	} else {
+		if (idx == wcid->hw_key_idx)
+			wcid->hw_key_idx = -1;
+
+		key = NULL;
+	}
+
+	if (!msta) {
+		if (key || wcid->hw_key_idx == idx) {
+			ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
+			if (ret)
+				return ret;
+		}
+
+		return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
+	}
+
+	return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int
+mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+	       const struct ieee80211_tx_queue_params *params)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	u8 cw_min = 5, cw_max = 10;
+	u32 val;
+
+	if (params->cw_min)
+		cw_min = fls(params->cw_min);
+	if (params->cw_max)
+		cw_max = fls(params->cw_max);
+
+	val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
+	      FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+	      FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
+	mt76_wr(dev, MT_EDCA_CFG_AC(queue), val);
+
+	val = mt76_rr(dev, MT_WMM_TXOP(queue));
+	val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
+	val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_TXOP(queue), val);
+
+	val = mt76_rr(dev, MT_WMM_AIFSN);
+	val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
+	val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_AIFSN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMIN);
+	val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
+	val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_CWMIN, val);
+
+	val = mt76_rr(dev, MT_WMM_CWMAX);
+	val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
+	val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
+	mt76_wr(dev, MT_WMM_CWMAX, val);
+
+	return 0;
+}
+
+static void
+mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	       const u8 *mac)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	tasklet_disable(&dev->pre_tbtt_tasklet);
+	set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	clear_bit(MT76_SCANNING, &dev->mt76.state);
+	tasklet_enable(&dev->pre_tbtt_tasklet);
+	mt76_txq_schedule_all(&dev->mt76);
+}
+
+static void
+mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	     u32 queues, bool drop)
+{
+}
+
+static int
+mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	*dbm = dev->txpower_cur / 2;
+	return 0;
+}
+
+static int
+mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		    struct ieee80211_ampdu_params *params)
+{
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	struct ieee80211_sta *sta = params->sta;
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct ieee80211_txq *txq = sta->txq[params->tid];
+	struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+	u16 tid = params->tid;
+	u16 *ssn = &params->ssn;
+
+	if (!txq)
+		return -EINVAL;
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+			   BIT(16 + tid));
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		mtxq->aggr = true;
+		mtxq->send_bar = false;
+		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		mtxq->aggr = false;
+		ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		mtxq->agg_ssn = *ssn << 4;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		mtxq->aggr = false;
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+	}
+
+	return 0;
+}
+
+static void
+mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta)
+{
+	struct mt76x2_dev *dev = hw->priv;
+	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
+	struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
+	struct ieee80211_tx_rate rate = {};
+
+	if (!rates)
+		return;
+
+	rate.idx = rates->rate[0].idx;
+	rate.flags = rates->rate[0].flags;
+	mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+	msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
+}
+
+static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
+				      s16 coverage_class)
+{
+	struct mt76x2_dev *dev = hw->priv;
+
+	mutex_lock(&dev->mutex);
+	dev->coverage_class = coverage_class;
+	mt76x2_set_tx_ackto(dev);
+	mutex_unlock(&dev->mutex);
+}
+
+const struct ieee80211_ops mt76x2_ops = {
+	.tx = mt76x2_tx,
+	.start = mt76x2_start,
+	.stop = mt76x2_stop,
+	.add_interface = mt76x2_add_interface,
+	.remove_interface = mt76x2_remove_interface,
+	.config = mt76x2_config,
+	.configure_filter = mt76x2_configure_filter,
+	.bss_info_changed = mt76x2_bss_info_changed,
+	.sta_add = mt76x2_sta_add,
+	.sta_remove = mt76x2_sta_remove,
+	.sta_notify = mt76x2_sta_notify,
+	.set_key = mt76x2_set_key,
+	.conf_tx = mt76x2_conf_tx,
+	.sw_scan_start = mt76x2_sw_scan,
+	.sw_scan_complete = mt76x2_sw_scan_complete,
+	.flush = mt76x2_flush,
+	.ampdu_action = mt76x2_ampdu_action,
+	.get_txpower = mt76x2_get_txpower,
+	.wake_tx_queue = mt76_wake_tx_queue,
+	.sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
+	.release_buffered_frames = mt76_release_buffered_frames,
+	.set_coverage_class = mt76x2_set_coverage_class,
+	.get_survey = mt76_get_survey,
+};
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
new file mode 100644
index 0000000..d45737e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_dma.h"
+#include "mt76x2_eeprom.h"
+
+struct mt76x2_fw_header {
+	__le32 ilm_len;
+	__le32 dlm_len;
+	__le16 build_ver;
+	__le16 fw_ver;
+	u8 pad[4];
+	char build_time[16];
+};
+
+struct mt76x2_patch_header {
+	char build_time[16];
+	char platform[4];
+	char hw_version[4];
+	char patch_version[4];
+	u8 pad[2];
+};
+
+static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(len, GFP_KERNEL);
+	memcpy(skb_put(skb, len), data, len);
+
+	return skb;
+}
+
+static struct sk_buff *
+mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
+{
+	unsigned long timeout;
+
+	if (!time_is_after_jiffies(expires))
+		return NULL;
+
+	timeout = expires - jiffies;
+	wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
+			   timeout);
+	return skb_dequeue(&dev->mcu.res_q);
+}
+
+static int
+mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
+		    enum mcu_cmd cmd)
+{
+	unsigned long expires = jiffies + HZ;
+	int ret;
+	u8 seq;
+
+	if (!skb)
+		return -EINVAL;
+
+	mutex_lock(&dev->mcu.mutex);
+
+	seq = ++dev->mcu.msg_seq & 0xf;
+	if (!seq)
+		seq = ++dev->mcu.msg_seq & 0xf;
+
+	ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
+	if (ret)
+		goto out;
+
+	while (1) {
+		u32 *rxfce;
+		bool check_seq = false;
+
+		skb = mt76x2_mcu_get_response(dev, expires);
+		if (!skb) {
+			dev_err(dev->mt76.dev,
+				"MCU message %d (seq %d) timed out\n", cmd,
+				seq);
+			ret = -ETIMEDOUT;
+			break;
+		}
+
+		rxfce = (u32 *) skb->cb;
+
+		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
+			check_seq = true;
+
+		dev_kfree_skb(skb);
+		if (check_seq)
+			break;
+	}
+
+out:
+	mutex_unlock(&dev->mcu.mutex);
+
+	return ret;
+}
+
+static int
+mt76pci_load_rom_patch(struct mt76x2_dev *dev)
+{
+	const struct firmware *fw = NULL;
+	struct mt76x2_patch_header *hdr;
+	bool rom_protect = !is_mt7612(dev);
+	int len, ret = 0;
+	__le32 *cur;
+	u32 patch_mask, patch_reg;
+
+	if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
+		dev_err(dev->mt76.dev,
+			"Could not get hardware semaphore for ROM PATCH\n");
+		return -ETIMEDOUT;
+	}
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
+		patch_mask = BIT(0);
+		patch_reg = MT_MCU_CLOCK_CTL;
+	} else {
+		patch_mask = BIT(1);
+		patch_reg = MT_MCU_COM_REG0;
+	}
+
+	if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
+		dev_info(dev->mt76.dev, "ROM patch already applied\n");
+		goto out;
+	}
+
+	ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
+	if (ret)
+		goto out;
+
+	if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
+		ret = -EIO;
+		dev_err(dev->mt76.dev, "Failed to load firmware\n");
+		goto out;
+	}
+
+	hdr = (struct mt76x2_patch_header *) fw->data;
+	dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
+
+	cur = (__le32 *) (fw->data + sizeof(*hdr));
+	len = fw->size - sizeof(*hdr);
+	mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+	/* Trigger ROM */
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
+
+	if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
+		dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
+		ret = -ETIMEDOUT;
+	}
+
+out:
+	/* release semaphore */
+	if (rom_protect)
+		mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
+	release_firmware(fw);
+	return ret;
+}
+
+static int
+mt76pci_load_firmware(struct mt76x2_dev *dev)
+{
+	const struct firmware *fw;
+	const struct mt76x2_fw_header *hdr;
+	int i, len, ret;
+	__le32 *cur;
+	u32 offset, val;
+
+	ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
+	if (ret)
+		return ret;
+
+	if (!fw || !fw->data || fw->size < sizeof(*hdr))
+		goto error;
+
+	hdr = (const struct mt76x2_fw_header *) fw->data;
+
+	len = sizeof(*hdr);
+	len += le32_to_cpu(hdr->ilm_len);
+	len += le32_to_cpu(hdr->dlm_len);
+
+	if (fw->size != len)
+		goto error;
+
+	val = le16_to_cpu(hdr->fw_ver);
+	dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
+		 (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
+
+	val = le16_to_cpu(hdr->build_ver);
+	dev_info(dev->mt76.dev, "Build: %x\n", val);
+	dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
+
+	cur = (__le32 *) (fw->data + sizeof(*hdr));
+	len = le32_to_cpu(hdr->ilm_len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
+	mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
+
+	cur += len / sizeof(*cur);
+	len = le32_to_cpu(hdr->dlm_len);
+
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		offset = MT_MCU_DLM_ADDR_E3;
+	else
+		offset = MT_MCU_DLM_ADDR;
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
+	mt76_wr_copy(dev, offset, cur, len);
+
+	mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
+
+	val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
+	if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
+		mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
+
+	/* trigger firmware */
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
+	for (i = 200; i > 0; i--) {
+		val = mt76_rr(dev, MT_MCU_COM_REG0);
+
+		if (val & 1)
+			break;
+
+		msleep(10);
+	}
+
+	if (!i) {
+		dev_err(dev->mt76.dev, "Firmware failed to start\n");
+		release_firmware(fw);
+		return -ETIMEDOUT;
+	}
+
+	dev_info(dev->mt76.dev, "Firmware running!\n");
+
+	release_firmware(fw);
+
+	return ret;
+
+error:
+	dev_err(dev->mt76.dev, "Invalid firmware\n");
+	release_firmware(fw);
+	return -ENOENT;
+}
+
+static int
+mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
+			   u32 val)
+{
+	struct sk_buff *skb;
+	struct {
+	    __le32 id;
+	    __le32 value;
+	} __packed __aligned(4) msg = {
+	    .id = cpu_to_le32(func),
+	    .value = cpu_to_le32(val),
+	};
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
+}
+
+int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
+		       u8 channel)
+{
+	struct sk_buff *skb;
+	struct {
+		u8 cr_mode;
+		u8 temp;
+		u8 ch;
+		u8 _pad0;
+
+		__le32 cfg;
+	} __packed __aligned(4) msg = {
+		.cr_mode = type,
+		.temp = temp_level,
+		.ch = channel,
+	};
+	u32 val;
+
+	val = BIT(31);
+	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
+	val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
+	msg.cfg = cpu_to_le32(val);
+
+	/* first set the channel without the extension channel info */
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
+}
+
+int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
+			   u8 bw_index, bool scan)
+{
+	struct sk_buff *skb;
+	struct {
+		u8 idx;
+		u8 scan;
+		u8 bw;
+		u8 _pad0;
+
+		__le16 chainmask;
+		u8 ext_chan;
+		u8 _pad1;
+
+	} __packed __aligned(4) msg = {
+		.idx = channel,
+		.scan = scan,
+		.bw = bw,
+		.chainmask = cpu_to_le16(dev->chainmask),
+	};
+
+	/* first set the channel without the extension channel info */
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+
+	usleep_range(5000, 10000);
+
+	msg.ext_chan = 0xe0 + bw_index;
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
+}
+
+int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 mode;
+		__le32 level;
+	} __packed __aligned(4) msg = {
+		.mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
+		.level = cpu_to_le32(0),
+	};
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
+}
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+			 u32 param)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		__le32 value;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(type),
+		.value = cpu_to_le32(param),
+	};
+	int ret;
+
+	mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
+				    BIT(31), BIT(31), 100)))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
+			 struct mt76x2_tssi_comp *tssi_data)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 id;
+		struct mt76x2_tssi_comp data;
+	} __packed __aligned(4) msg = {
+		.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
+		.data = *tssi_data,
+	};
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
+}
+
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+			 bool force)
+{
+	struct sk_buff *skb;
+	struct {
+		__le32 channel;
+		__le32 gain_val;
+	} __packed __aligned(4) msg = {
+		.channel = cpu_to_le32(channel),
+		.gain_val = cpu_to_le32(gain),
+	};
+
+	if (force)
+		msg.channel |= cpu_to_le32(BIT(31));
+
+	skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
+	return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
+}
+
+int mt76x2_mcu_init(struct mt76x2_dev *dev)
+{
+	int ret;
+
+	mutex_init(&dev->mcu.mutex);
+
+	ret = mt76pci_load_rom_patch(dev);
+	if (ret)
+		return ret;
+
+	ret = mt76pci_load_firmware(dev);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_function_select(dev, Q_SELECT, 1);
+	return 0;
+}
+
+int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
+{
+	struct sk_buff *skb;
+
+	mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
+	usleep_range(20000, 30000);
+
+	while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
+		dev_kfree_skb(skb);
+
+	return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
new file mode 100644
index 0000000..d7a7e83
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mcu.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_MCU_H
+#define __MT76x2_MCU_H
+
+/* Register definitions */
+#define MT_MCU_CPU_CTL			0x0704
+#define MT_MCU_CLOCK_CTL		0x0708
+#define MT_MCU_RESET_CTL		0x070C
+#define MT_MCU_INT_LEVEL		0x0718
+#define MT_MCU_COM_REG0			0x0730
+#define MT_MCU_COM_REG1			0x0734
+#define MT_MCU_COM_REG2			0x0738
+#define MT_MCU_COM_REG3			0x073C
+#define MT_MCU_PCIE_REMAP_BASE1		0x0740
+#define MT_MCU_PCIE_REMAP_BASE2		0x0744
+#define MT_MCU_PCIE_REMAP_BASE3		0x0748
+#define MT_MCU_PCIE_REMAP_BASE4		0x074C
+
+#define MT_LED_CTRL			0x0770
+#define MT_LED_CTRL_REPLAY(_n)		BIT(0 + (8 * (_n)))
+#define MT_LED_CTRL_POLARITY(_n)	BIT(1 + (8 * (_n)))
+#define MT_LED_CTRL_TX_BLINK_MODE(_n)	BIT(2 + (8 * (_n)))
+#define MT_LED_CTRL_KICK(_n)		BIT(7 + (8 * (_n)))
+
+#define MT_LED_TX_BLINK_0		0x0774
+#define MT_LED_TX_BLINK_1		0x0778
+
+#define MT_LED_S0_BASE			0x077C
+#define MT_LED_S0(_n)			(MT_LED_S0_BASE + 8 * (_n))
+#define MT_LED_S1_BASE			0x0780
+#define MT_LED_S1(_n)			(MT_LED_S1_BASE + 8 * (_n))
+#define MT_LED_STATUS_OFF_MASK		GENMASK(31, 24)
+#define MT_LED_STATUS_OFF(_v)		(((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \
+					 MT_LED_STATUS_OFF_MASK)
+#define MT_LED_STATUS_ON_MASK		GENMASK(23, 16)
+#define MT_LED_STATUS_ON(_v)		(((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \
+					 MT_LED_STATUS_ON_MASK)
+#define MT_LED_STATUS_DURATION_MASK	GENMASK(15, 8)
+#define MT_LED_STATUS_DURATION(_v)	(((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \
+					 MT_LED_STATUS_DURATION_MASK)
+
+#define MT_MCU_SEMAPHORE_00		0x07B0
+#define MT_MCU_SEMAPHORE_01		0x07B4
+#define MT_MCU_SEMAPHORE_02		0x07B8
+#define MT_MCU_SEMAPHORE_03		0x07BC
+
+#define MT_MCU_ROM_PATCH_OFFSET		0x80000
+#define MT_MCU_ROM_PATCH_ADDR		0x90000
+
+#define MT_MCU_ILM_OFFSET		0x80000
+#define MT_MCU_ILM_ADDR			0x80000
+
+#define MT_MCU_DLM_OFFSET		0x100000
+#define MT_MCU_DLM_ADDR			0x90000
+#define MT_MCU_DLM_ADDR_E3		0x90800
+
+enum mcu_cmd {
+	CMD_FUN_SET_OP = 1,
+	CMD_LOAD_CR = 2,
+	CMD_INIT_GAIN_OP = 3,
+	CMD_DYNC_VGA_OP = 6,
+	CMD_TDLS_CH_SW = 7,
+	CMD_BURST_WRITE = 8,
+	CMD_READ_MODIFY_WRITE = 9,
+	CMD_RANDOM_READ = 10,
+	CMD_BURST_READ = 11,
+	CMD_RANDOM_WRITE = 12,
+	CMD_LED_MODE_OP = 16,
+	CMD_POWER_SAVING_OP = 20,
+	CMD_WOW_CONFIG = 21,
+	CMD_WOW_QUERY = 22,
+	CMD_WOW_FEATURE = 24,
+	CMD_CARRIER_DETECT_OP = 28,
+	CMD_RADOR_DETECT_OP = 29,
+	CMD_SWITCH_CHANNEL_OP = 30,
+	CMD_CALIBRATION_OP = 31,
+	CMD_BEACON_OP = 32,
+	CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+	Q_SELECT = 1,
+	BW_SETTING = 2,
+	USB2_SW_DISCONNECT = 2,
+	USB3_SW_DISCONNECT = 3,
+	LOG_FW_DEBUG_MSG = 4,
+	GET_FW_VERSION = 5,
+};
+
+enum mcu_power_mode {
+	RADIO_OFF = 0x30,
+	RADIO_ON = 0x31,
+	RADIO_OFF_AUTO_WAKEUP = 0x32,
+	RADIO_OFF_ADVANCE = 0x33,
+	RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibration {
+	MCU_CAL_R = 1,
+	MCU_CAL_TEMP_SENSOR,
+	MCU_CAL_RXDCOC,
+	MCU_CAL_RC,
+	MCU_CAL_SX_LOGEN,
+	MCU_CAL_LC,
+	MCU_CAL_TX_LOFT,
+	MCU_CAL_TXIQ,
+	MCU_CAL_TSSI,
+	MCU_CAL_TSSI_COMP,
+	MCU_CAL_DPD,
+	MCU_CAL_RXIQC_FI,
+	MCU_CAL_RXIQC_FD,
+	MCU_CAL_PWRON,
+	MCU_CAL_TX_SHAPING,
+};
+
+enum mt76x2_mcu_cr_mode {
+	MT_RF_CR,
+	MT_BBP_CR,
+	MT_RF_BBP_CR,
+	MT_HL_TEMP_CR_UPDATE,
+};
+
+struct mt76x2_tssi_comp {
+	u8 pa_mode;
+	u8 cal_mode;
+	u16 pad;
+
+	u8 slope0;
+	u8 slope1;
+	u8 offset0;
+	u8 offset1;
+} __packed __aligned(4);
+
+int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
+			 u32 param);
+int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev, struct mt76x2_tssi_comp *tssi_data);
+int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
+			 bool force);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
new file mode 100644
index 0000000..e66f047
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_pci.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt76x2.h"
+#include "mt76x2_trace.h"
+
+static const struct pci_device_id mt76pci_device_table[] = {
+	{ PCI_DEVICE(0x14c3, 0x7662) },
+	{ PCI_DEVICE(0x14c3, 0x7612) },
+	{ PCI_DEVICE(0x14c3, 0x7602) },
+	{ },
+};
+
+static int
+mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct mt76x2_dev *dev;
+	int ret;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+	if (ret)
+		return ret;
+
+	pci_set_master(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	dev = mt76x2_alloc_device(&pdev->dev);
+	if (!dev)
+		return -ENOMEM;
+
+	mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+	dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
+	dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
+
+	ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x2_irq_handler,
+			       IRQF_SHARED, KBUILD_MODNAME, dev);
+	if (ret)
+		goto error;
+
+	ret = mt76x2_register_device(dev);
+	if (ret)
+		goto error;
+
+	/* Fix up ASPM configuration */
+
+	/* RG_SSUSB_G1_CDR_BIR_LTR = 0x9 */
+	mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
+
+	/* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
+	mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+
+	/* RG_SSUSB_CDR_BR_PE1D = 0x3 */
+	mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
+
+	return 0;
+
+error:
+	ieee80211_free_hw(mt76_hw(dev));
+	return ret;
+}
+
+static void
+mt76pci_remove(struct pci_dev *pdev)
+{
+	struct mt76_dev *mdev = pci_get_drvdata(pdev);
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+
+	mt76_unregister_device(mdev);
+	mt76x2_cleanup(dev);
+	ieee80211_free_hw(mdev->hw);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct pci_driver mt76pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= mt76pci_device_table,
+	.probe		= mt76pci_probe,
+	.remove		= mt76pci_remove,
+};
+
+module_pci_driver(mt76pci_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
new file mode 100644
index 0000000..1264971
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c
@@ -0,0 +1,758 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include "mt76x2.h"
+#include "mt76x2_mcu.h"
+#include "mt76x2_eeprom.h"
+
+static void
+mt76x2_adjust_high_lna_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain -= offset / 2;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_LNA_HIGH_GAIN, gain);
+}
+
+static void
+mt76x2_adjust_agc_gain(struct mt76x2_dev *dev, int reg, s8 offset)
+{
+	s8 gain;
+
+	gain = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, reg)));
+	gain += offset;
+	mt76_rmw_field(dev, MT_BBP(AGC, reg), MT_BBP_AGC_GAIN, gain);
+}
+
+static void
+mt76x2_apply_gain_adj(struct mt76x2_dev *dev)
+{
+	s8 *gain_adj = dev->cal.rx.high_gain;
+
+	mt76x2_adjust_high_lna_gain(dev, 4, gain_adj[0]);
+	mt76x2_adjust_high_lna_gain(dev, 5, gain_adj[1]);
+
+	mt76x2_adjust_agc_gain(dev, 8, gain_adj[0]);
+	mt76x2_adjust_agc_gain(dev, 9, gain_adj[1]);
+}
+
+static u32
+mt76x2_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
+{
+	u32 val = 0;
+
+	val |= (v1 & (BIT(6) - 1)) << 0;
+	val |= (v2 & (BIT(6) - 1)) << 8;
+	val |= (v3 & (BIT(6) - 1)) << 16;
+	val |= (v4 & (BIT(6) - 1)) << 24;
+	return val;
+}
+
+int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain)
+{
+	struct mt76x2_rx_freq_cal *cal = &dev->cal.rx;
+
+	rssi += cal->rssi_offset[chain];
+	rssi -= cal->lna_gain;
+
+	return rssi;
+}
+
+static u8
+mt76x2_txpower_check(int value)
+{
+	if (value < 0)
+		return 0;
+	if (value > 0x2f)
+		return 0x2f;
+	return value;
+}
+
+static void
+mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		r->all[i] += offset;
+}
+
+static void
+mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit)
+{
+	int i;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		if (r->all[i] > limit)
+			r->all[i] = limit;
+}
+
+static int
+mt76x2_get_max_power(struct mt76_rate_power *r)
+{
+	int i;
+	s8 ret = 0;
+
+	for (i = 0; i < sizeof(r->all); i++)
+		ret = max(ret, r->all[i]);
+
+	return ret;
+}
+
+void mt76x2_phy_set_txpower(struct mt76x2_dev *dev)
+{
+	enum nl80211_chan_width width = dev->mt76.chandef.width;
+	struct mt76x2_tx_power_info txp;
+	int txp_0, txp_1, delta = 0;
+	struct mt76_rate_power t = {};
+
+	mt76x2_get_power_info(dev, &txp);
+
+	if (width == NL80211_CHAN_WIDTH_40)
+		delta = txp.delta_bw40;
+	else if (width == NL80211_CHAN_WIDTH_80)
+		delta = txp.delta_bw80;
+
+	if (txp.target_power > dev->txpower_conf)
+		delta -= txp.target_power - dev->txpower_conf;
+
+	mt76x2_get_rate_power(dev, &t);
+	mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power +
+				   txp.chain[0].delta);
+	mt76x2_limit_rate_power(&t, dev->txpower_conf);
+	dev->txpower_cur = mt76x2_get_max_power(&t);
+	mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power +
+					 txp.chain[0].delta + delta));
+	dev->target_power = txp.chain[0].target_power;
+	dev->target_power_delta[0] = txp.chain[0].delta + delta;
+	dev->target_power_delta[1] = txp.chain[1].delta + delta;
+	dev->rate_power = t;
+
+	txp_0 = mt76x2_txpower_check(txp.chain[0].target_power +
+				   txp.chain[0].delta + delta);
+
+	txp_1 = mt76x2_txpower_check(txp.chain[1].target_power +
+				   txp.chain[1].delta + delta);
+
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
+
+	mt76_wr(dev, MT_TX_PWR_CFG_0,
+		mt76x2_tx_power_mask(t.cck[0], t.cck[2], t.ofdm[0], t.ofdm[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_1,
+		mt76x2_tx_power_mask(t.ofdm[4], t.ofdm[6], t.ht[0], t.ht[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_2,
+		mt76x2_tx_power_mask(t.ht[4], t.ht[6], t.ht[8], t.ht[10]));
+	mt76_wr(dev, MT_TX_PWR_CFG_3,
+		mt76x2_tx_power_mask(t.ht[12], t.ht[14], t.ht[0], t.ht[2]));
+	mt76_wr(dev, MT_TX_PWR_CFG_4,
+		mt76x2_tx_power_mask(t.ht[4], t.ht[6], 0, 0));
+	mt76_wr(dev, MT_TX_PWR_CFG_7,
+		mt76x2_tx_power_mask(t.ofdm[6], t.vht[8], t.ht[6], t.vht[8]));
+	mt76_wr(dev, MT_TX_PWR_CFG_8,
+		mt76x2_tx_power_mask(t.ht[14], t.vht[8], t.vht[8], 0));
+	mt76_wr(dev, MT_TX_PWR_CFG_9,
+		mt76x2_tx_power_mask(t.ht[6], t.vht[8], t.vht[8], 0));
+}
+
+static bool
+mt76x2_channel_silent(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+
+	return ((chan->flags & IEEE80211_CHAN_RADAR) &&
+		chan->dfs_state != NL80211_DFS_AVAILABLE);
+}
+
+static bool
+mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	u32 flag = 0;
+
+	if (!mt76x2_tssi_enabled(dev))
+		return false;
+
+	if (mt76x2_channel_silent(dev))
+		return false;
+
+	if (chan->band == NL80211_BAND_2GHZ)
+		flag |= BIT(0);
+
+	if (mt76x2_ext_pa_enabled(dev, chan->band))
+		flag |= BIT(8);
+
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
+	dev->cal.tssi_cal_done = true;
+	return true;
+}
+
+static void
+mt76x2_phy_channel_calibrate(struct mt76x2_dev *dev, bool mac_stopped)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+
+	if (dev->cal.channel_cal_done)
+		return;
+
+	if (mt76x2_channel_silent(dev))
+		return;
+
+	if (!dev->cal.tssi_cal_done)
+		mt76x2_phy_tssi_init_cal(dev);
+
+	if (!mac_stopped)
+		mt76x2_mac_stop(dev, false);
+
+	if (is_5ghz)
+		mt76x2_mcu_calibrate(dev, MCU_CAL_LC, 0);
+
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+	mt76x2_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
+
+	if (!mac_stopped)
+		mt76x2_mac_resume(dev);
+
+	mt76x2_apply_gain_adj(dev);
+
+	dev->cal.channel_cal_done = true;
+}
+
+static void
+mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band)
+{
+	u32 pa_mode[2];
+	u32 pa_mode_adj;
+
+	if (band == NL80211_BAND_2GHZ) {
+		pa_mode[0] = 0x010055ff;
+		pa_mode[1] = 0x00550055;
+
+		mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
+		mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
+
+		if (mt76x2_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
+		} else {
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0xf4000200);
+			mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0xfa000200);
+		}
+	} else {
+		pa_mode[0] = 0x0000ffff;
+		pa_mode[1] = 0x00ff00ff;
+
+		if (mt76x2_ext_pa_enabled(dev, band)) {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
+		} else {
+			mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400);
+			mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
+		}
+		mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+
+		if (mt76x2_ext_pa_enabled(dev, band))
+			pa_mode_adj = 0x04000000;
+		else
+			pa_mode_adj = 0;
+
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ0, pa_mode_adj);
+		mt76_wr(dev, MT_RF_PA_MODE_ADJ1, pa_mode_adj);
+	}
+
+	mt76_wr(dev, MT_BB_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_BB_PA_MODE_CFG1, pa_mode[1]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
+	mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
+
+	if (mt76x2_ext_pa_enabled(dev, band)) {
+		u32 val;
+
+		if (band == NL80211_BAND_2GHZ)
+			val = 0x3c3c023c;
+		else
+			val = 0x363c023c;
+
+		mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+		mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00001818);
+	} else {
+		if (band == NL80211_BAND_2GHZ) {
+			u32 val = 0x0f3c3c3c;
+
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, val);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0x00000606);
+		} else {
+			mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x383c023c);
+			mt76_wr(dev, MT_TX1_RF_GAIN_CORR, 0x24282e28);
+			mt76_wr(dev, MT_TX_ALC_CFG_4, 0);
+		}
+	}
+}
+
+static void
+mt76x2_configure_tx_delay(struct mt76x2_dev *dev, enum nl80211_band band, u8 bw)
+{
+	u32 cfg0, cfg1;
+
+	if (mt76x2_ext_pa_enabled(dev, band)) {
+		cfg0 = bw ? 0x000b0c01 : 0x00101101;
+		cfg1 = 0x00011414;
+	} else {
+		cfg0 = bw ? 0x000b0b01 : 0x00101001;
+		cfg1 = 0x00021414;
+	}
+	mt76_wr(dev, MT_TX_SW_CFG0, cfg0);
+	mt76_wr(dev, MT_TX_SW_CFG1, cfg1);
+
+	mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_CCK_SIFS,
+		       13 + (bw ? 1 : 0));
+}
+
+static void
+mt76x2_phy_set_bw(struct mt76x2_dev *dev, int width, u8 ctrl)
+{
+	int core_val, agc_val;
+
+	switch (width) {
+	case NL80211_CHAN_WIDTH_80:
+		core_val = 3;
+		agc_val = 7;
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		core_val = 2;
+		agc_val = 3;
+		break;
+	default:
+		core_val = 0;
+		agc_val = 1;
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
+	mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
+	mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
+}
+
+static void
+mt76x2_phy_set_band(struct mt76x2_dev *dev, int band, bool primary_upper)
+{
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	case NL80211_BAND_5GHZ:
+		mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
+		mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
+		break;
+	}
+
+	mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
+		       primary_upper);
+}
+
+static void
+mt76x2_set_rx_chains(struct mt76x2_dev *dev)
+{
+	u32 val;
+
+	val = mt76_rr(dev, MT_BBP(AGC, 0));
+	val &= ~(BIT(3) | BIT(4));
+
+	if (dev->chainmask & BIT(1))
+		val |= BIT(3);
+
+	mt76_wr(dev, MT_BBP(AGC, 0), val);
+}
+
+static void
+mt76x2_set_tx_dac(struct mt76x2_dev *dev)
+{
+	if (dev->chainmask & BIT(1))
+		mt76_set(dev, MT_BBP(TXBE, 5), 3);
+	else
+		mt76_clear(dev, MT_BBP(TXBE, 5), 3);
+}
+
+static void
+mt76x2_get_agc_gain(struct mt76x2_dev *dev, u8 *dest)
+{
+	dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
+	dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
+}
+
+static int
+mt76x2_get_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_80:
+		return -62;
+	case NL80211_CHAN_WIDTH_40:
+		return -65;
+	default:
+		return -68;
+	}
+}
+
+static int
+mt76x2_get_low_rssi_gain_thresh(struct mt76x2_dev *dev)
+{
+	switch (dev->mt76.chandef.width) {
+	case NL80211_CHAN_WIDTH_80:
+		return -76;
+	case NL80211_CHAN_WIDTH_40:
+		return -79;
+	default:
+		return -82;
+	}
+}
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x2_dev *dev)
+{
+	u32 val;
+	u8 gain_val[2];
+
+	gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+	gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+	if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+		val = 0x1e42 << 16;
+	else
+		val = 0x1836 << 16;
+
+	val |= 0xf8;
+
+	mt76_wr(dev, MT_BBP(AGC, 8),
+		val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+	mt76_wr(dev, MT_BBP(AGC, 9),
+		val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+	if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+		mt76x2_dfs_adjust_agc(dev);
+}
+
+static void
+mt76x2_phy_adjust_vga_gain(struct mt76x2_dev *dev)
+{
+	u32 false_cca;
+	u8 limit = dev->cal.low_gain > 1 ? 4 : 16;
+
+	false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
+	if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
+		dev->cal.agc_gain_adjust += 2;
+	else if (false_cca < 10 && dev->cal.agc_gain_adjust > 0)
+		dev->cal.agc_gain_adjust -= 2;
+	else
+		return;
+
+	mt76x2_phy_set_gain_val(dev);
+}
+
+static void
+mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev)
+{
+	u32 val = mt76_rr(dev, MT_BBP(AGC, 20));
+	int rssi0 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI0, val);
+	int rssi1 = (s8) FIELD_GET(MT_BBP_AGC20_RSSI1, val);
+	u8 *gain = dev->cal.agc_gain_init;
+	u8 gain_delta;
+	int low_gain;
+
+	dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8);
+	dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8);
+	dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] +
+				 dev->cal.avg_rssi[1]) / 512;
+
+	low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
+		   (dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
+
+	if (dev->cal.low_gain == low_gain) {
+		mt76x2_phy_adjust_vga_gain(dev);
+		return;
+	}
+
+	dev->cal.low_gain = low_gain;
+
+	if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+		mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+	else
+		mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+
+	if (low_gain) {
+		mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+		mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+		mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+		if (mt76x2_has_ext_lna(dev))
+			gain_delta = 10;
+		else
+			gain_delta = 14;
+	} else {
+		mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+		if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+			mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
+		else
+			mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
+		mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
+		gain_delta = 0;
+	}
+
+	dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+	dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+	dev->cal.agc_gain_adjust = 0;
+	mt76x2_phy_set_gain_val(dev);
+}
+
+int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
+			   struct cfg80211_chan_def *chandef)
+{
+	struct ieee80211_channel *chan = chandef->chan;
+	bool scan = test_bit(MT76_SCANNING, &dev->mt76.state);
+	enum nl80211_band band = chan->band;
+	u8 channel;
+
+	u32 ext_cca_chan[4] = {
+		[0] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(0)),
+		[1] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(1)),
+		[2] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(2)),
+		[3] = FIELD_PREP(MT_EXT_CCA_CFG_CCA0, 3) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA1, 2) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA2, 1) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA3, 0) |
+		      FIELD_PREP(MT_EXT_CCA_CFG_CCA_MASK, BIT(3)),
+	};
+	int ch_group_index;
+	u8 bw, bw_index;
+	int freq, freq1;
+	int ret;
+	u8 sifs = 13;
+
+	dev->cal.channel_cal_done = false;
+	freq = chandef->chan->center_freq;
+	freq1 = chandef->center_freq1;
+	channel = chan->hw_value;
+
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_40:
+		bw = 1;
+		if (freq1 > freq) {
+			bw_index = 1;
+			ch_group_index = 0;
+		} else {
+			bw_index = 3;
+			ch_group_index = 1;
+		}
+		channel += 2 - ch_group_index * 4;
+		break;
+	case NL80211_CHAN_WIDTH_80:
+		ch_group_index = (freq - freq1 + 30) / 20;
+		if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
+			ch_group_index = 0;
+		bw = 2;
+		bw_index = ch_group_index;
+		channel += 6 - ch_group_index * 4;
+		break;
+	default:
+		bw = 0;
+		bw_index = 0;
+		ch_group_index = 0;
+		break;
+	}
+
+	mt76x2_read_rx_gain(dev);
+	mt76x2_phy_set_txpower_regs(dev, band);
+	mt76x2_configure_tx_delay(dev, band, bw);
+	mt76x2_phy_set_txpower(dev);
+
+	mt76x2_set_rx_chains(dev);
+	mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
+	mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
+	mt76x2_set_tx_dac(dev);
+
+	mt76_rmw(dev, MT_EXT_CCA_CFG,
+		 (MT_EXT_CCA_CFG_CCA0 |
+		  MT_EXT_CCA_CFG_CCA1 |
+		  MT_EXT_CCA_CFG_CCA2 |
+		  MT_EXT_CCA_CFG_CCA3 |
+		  MT_EXT_CCA_CFG_CCA_MASK),
+		 ext_cca_chan[ch_group_index]);
+
+	if (chandef->width >= NL80211_CHAN_WIDTH_40)
+		sifs++;
+
+	mt76_rmw_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS, sifs);
+
+	ret = mt76x2_mcu_set_channel(dev, channel, bw, bw_index, scan);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_init_gain(dev, channel, dev->cal.rx.mcu_gain, true);
+
+	/* Enable LDPC Rx */
+	if (mt76xx_rev(dev) >= MT76XX_REV_E3)
+		mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
+
+	if (!dev->cal.init_cal_done) {
+		u8 val = mt76x2_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
+
+		if (val != 0xff)
+			mt76x2_mcu_calibrate(dev, MCU_CAL_R, 0);
+	}
+
+	mt76x2_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
+
+	/* Rx LPF calibration */
+	if (!dev->cal.init_cal_done)
+		mt76x2_mcu_calibrate(dev, MCU_CAL_RC, 0);
+
+	dev->cal.init_cal_done = true;
+
+	mt76_wr(dev, MT_BBP(AGC, 61), 0xFF64A4E2);
+	mt76_wr(dev, MT_BBP(AGC, 7), 0x08081010);
+	mt76_wr(dev, MT_BBP(AGC, 11), 0x00000404);
+	mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070);
+	mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F);
+
+	if (scan)
+		return 0;
+
+	dev->cal.low_gain = -1;
+	mt76x2_phy_channel_calibrate(dev, true);
+	mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
+	memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
+	       sizeof(dev->cal.agc_gain_cur));
+
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+
+	return 0;
+}
+
+static void
+mt76x2_phy_tssi_compensate(struct mt76x2_dev *dev)
+{
+	struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+	struct mt76x2_tx_power_info txp;
+	struct mt76x2_tssi_comp t = {};
+
+	if (!dev->cal.tssi_cal_done)
+		return;
+
+	if (!dev->cal.tssi_comp_pending) {
+		/* TSSI trigger */
+		t.cal_mode = BIT(0);
+		mt76x2_mcu_tssi_comp(dev, &t);
+		dev->cal.tssi_comp_pending = true;
+	} else {
+		if (mt76_rr(dev, MT_BBP(CORE, 34)) & BIT(4))
+			return;
+
+		dev->cal.tssi_comp_pending = false;
+		mt76x2_get_power_info(dev, &txp);
+
+		if (mt76x2_ext_pa_enabled(dev, chan->band))
+			t.pa_mode = 1;
+
+		t.cal_mode = BIT(1);
+		t.slope0 = txp.chain[0].tssi_slope;
+		t.offset0 = txp.chain[0].tssi_offset;
+		t.slope1 = txp.chain[1].tssi_slope;
+		t.offset1 = txp.chain[1].tssi_offset;
+		mt76x2_mcu_tssi_comp(dev, &t);
+
+		if (t.pa_mode || dev->cal.dpd_cal_done)
+			return;
+
+		usleep_range(10000, 20000);
+		mt76x2_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
+		dev->cal.dpd_cal_done = true;
+	}
+}
+
+static void
+mt76x2_phy_temp_compensate(struct mt76x2_dev *dev)
+{
+	struct mt76x2_temp_comp t;
+	int temp, db_diff;
+
+	if (mt76x2_get_temp_comp(dev, &t))
+		return;
+
+	temp = mt76_get_field(dev, MT_TEMP_SENSOR, MT_TEMP_SENSOR_VAL);
+	temp -= t.temp_25_ref;
+	temp = (temp * 1789) / 1000 + 25;
+	dev->cal.temp = temp;
+
+	if (temp > 25)
+		db_diff = (temp - 25) / t.high_slope;
+	else
+		db_diff = (25 - temp) / t.low_slope;
+
+	db_diff = min(db_diff, t.upper_bound);
+	db_diff = max(db_diff, t.lower_bound);
+
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+		       db_diff * 2);
+	mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
+		       db_diff * 2);
+}
+
+void mt76x2_phy_calibrate(struct work_struct *work)
+{
+	struct mt76x2_dev *dev;
+
+	dev = container_of(work, struct mt76x2_dev, cal_work.work);
+	mt76x2_phy_channel_calibrate(dev, false);
+	mt76x2_phy_tssi_compensate(dev);
+	mt76x2_phy_temp_compensate(dev);
+	mt76x2_phy_update_channel_gain(dev);
+	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
+				     MT_CALIBRATE_INTERVAL);
+}
+
+int mt76x2_phy_start(struct mt76x2_dev *dev)
+{
+	int ret;
+
+	ret = mt76x2_mcu_set_radio_state(dev, true);
+	if (ret)
+		return ret;
+
+	mt76x2_mcu_load_cr(dev, MT_RF_BBP_CR, 0, 0);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
new file mode 100644
index 0000000..ce3ab85
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_regs.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __MT76x2_REGS_H
+#define __MT76x2_REGS_H
+
+#define MT_ASIC_VERSION			0x0000
+
+#define MT76XX_REV_E3		0x22
+#define MT76XX_REV_E4		0x33
+
+#define MT_CMB_CTRL			0x0020
+#define MT_CMB_CTRL_XTAL_RDY		BIT(22)
+#define MT_CMB_CTRL_PLL_LD		BIT(23)
+
+#define MT_EFUSE_CTRL			0x0024
+#define MT_EFUSE_CTRL_AOUT		GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE		GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME	GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME	GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN		GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK		BIT(30)
+#define MT_EFUSE_CTRL_SEL		BIT(31)
+
+#define MT_EFUSE_DATA_BASE		0x0028
+#define MT_EFUSE_DATA(_n)		(MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0			0x0040
+#define MT_COEXCFG0_COEX_EN		BIT(0)
+
+#define MT_WLAN_FUN_CTRL		0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN	BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN	BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF	BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET	BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN	BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ	BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL	BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL	BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST	BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST	BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN	BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN	GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT	GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN	GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0			0x0100
+#define MT_XO_CTRL1			0x0104
+#define MT_XO_CTRL2			0x0108
+#define MT_XO_CTRL3			0x010c
+#define MT_XO_CTRL4			0x0110
+
+#define MT_XO_CTRL5			0x0114
+#define MT_XO_CTRL5_C2_VAL		GENMASK(14, 8)
+
+#define MT_XO_CTRL6			0x0118
+#define MT_XO_CTRL6_C2_CTRL		GENMASK(14, 8)
+
+#define MT_XO_CTRL7			0x011c
+
+#define MT_WLAN_MTC_CTRL		0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP	BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK	BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S	BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD	GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD	BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD	BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD	BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB	BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB	BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB	BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB	BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP	BIT(28)
+
+#define MT_INT_SOURCE_CSR		0x0200
+#define MT_INT_MASK_CSR			0x0204
+
+#define MT_INT_RX_DONE(_n)		BIT(_n)
+#define MT_INT_RX_DONE_ALL		GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL		GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)		BIT(_n + 4)
+#define MT_INT_RX_COHERENT		BIT(16)
+#define MT_INT_TX_COHERENT		BIT(17)
+#define MT_INT_ANY_COHERENT		BIT(18)
+#define MT_INT_MCU_CMD			BIT(19)
+#define MT_INT_TBTT			BIT(20)
+#define MT_INT_PRE_TBTT			BIT(21)
+#define MT_INT_TX_STAT			BIT(22)
+#define MT_INT_AUTO_WAKEUP		BIT(23)
+#define MT_INT_GPTIMER			BIT(24)
+#define MT_INT_RXDELAYINT		BIT(26)
+#define MT_INT_TXDELAYINT		BIT(27)
+
+#define MT_WPDMA_GLO_CFG		0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN	BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY	BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN	BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY	BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE	GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE	BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN	BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN	GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS	BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET	BIT(31)
+
+#define MT_WPDMA_RST_IDX		0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG		0x0210
+
+#define MT_WMM_AIFSN		0x0214
+#define MT_WMM_AIFSN_MASK		GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMIN		0x0218
+#define MT_WMM_CWMIN_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_CWMAX		0x021c
+#define MT_WMM_CWMAX_MASK		GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)		((_n) * 4)
+
+#define MT_WMM_TXOP_BASE		0x0220
+#define MT_WMM_TXOP(_n)			(MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)		((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK		GENMASK(15, 0)
+
+#define MT_TSO_CTRL			0x0250
+#define MT_HEADER_TRANS_CTRL_REG	0x0260
+
+#define MT_TX_RING_BASE			0x0300
+#define MT_RX_RING_BASE			0x03c0
+
+#define MT_TX_HW_QUEUE_MCU		8
+#define MT_TX_HW_QUEUE_MGMT		9
+
+#define MT_PBF_SYS_CTRL			0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET	BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET	BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET	BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET	BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET	BIT(4)
+
+#define MT_PBF_CFG			0x0404
+#define MT_PBF_CFG_TX0Q_EN		BIT(0)
+#define MT_PBF_CFG_TX1Q_EN		BIT(1)
+#define MT_PBF_CFG_TX2Q_EN		BIT(2)
+#define MT_PBF_CFG_TX3Q_EN		BIT(3)
+#define MT_PBF_CFG_RX0Q_EN		BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN		BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT		0x0408
+#define MT_PBF_RX_MAX_PCNT		0x040c
+
+#define MT_BCN_OFFSET_BASE		0x041c
+#define MT_BCN_OFFSET(_n)		(MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define MT_RF_BYPASS_0			0x0504
+#define MT_RF_BYPASS_1			0x0508
+#define MT_RF_SETTING_0			0x050c
+
+#define MT_RF_DATA_WRITE		0x0524
+
+#define MT_RF_CTRL			0x0528
+#define MT_RF_CTRL_ADDR			GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE		BIT(12)
+#define MT_RF_CTRL_BUSY			BIT(13)
+#define MT_RF_CTRL_IDX			BIT(16)
+
+#define MT_RF_DATA_READ			0x052c
+
+#define MT_FCE_PSE_CTRL			0x0800
+#define MT_FCE_PARAMETERS		0x0804
+#define MT_FCE_CSO			0x0808
+
+#define MT_FCE_L2_STUFF			0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN	BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN	BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN	BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN	BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN	BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP	BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN	GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN	GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT	GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1	0x0824
+
+#define MT_PAUSE_ENABLE_CONTROL1	0x0a38
+
+#define MT_MAC_CSR0			0x1000
+
+#define MT_MAC_SYS_CTRL			0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR	BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP	BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX	BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX	BIT(3)
+
+#define MT_MAC_ADDR_DW0			0x1008
+#define MT_MAC_ADDR_DW1			0x100c
+
+#define MT_MAC_BSSID_DW0		0x1010
+#define MT_MAC_BSSID_DW1		0x1014
+#define MT_MAC_BSSID_DW1_ADDR		GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE	GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N	GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT	BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2	BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3	BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE	GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG			0x1018
+
+#define MT_AMPDU_MAX_LEN_20M1S		0x1030
+#define MT_AMPDU_MAX_LEN_20M2S		0x1034
+#define MT_AMPDU_MAX_LEN_40M1S		0x1038
+#define MT_AMPDU_MAX_LEN_40M2S		0x103c
+#define MT_AMPDU_MAX_LEN		0x1040
+
+#define MT_WCID_DROP_BASE		0x106c
+#define MT_WCID_DROP(_n)		(MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)		BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK		0x108c
+
+#define MT_MAC_APC_BSSID_BASE		0x1090
+#define MT_MAC_APC_BSSID_L(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)		(MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR		GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN		BIT(16)
+
+#define MT_XIFS_TIME_CFG		0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS	GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS	GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS	GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS		GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN	BIT(29)
+
+#define MT_BKOFF_SLOT_CFG		0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME	GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY	GENMASK(11, 8)
+
+#define MT_CH_TIME_CFG			0x110c
+#define MT_CH_TIME_CFG_TIMER_EN		BIT(0)
+#define MT_CH_TIME_CFG_TX_AS_BUSY	BIT(1)
+#define MT_CH_TIME_CFG_RX_AS_BUSY	BIT(2)
+#define MT_CH_TIME_CFG_NAV_AS_BUSY	BIT(3)
+#define MT_CH_TIME_CFG_EIFS_AS_BUSY	BIT(4)
+#define MT_CH_TIME_CFG_MDRDY_CNT_EN	BIT(5)
+#define MT_CH_TIME_CFG_CH_TIMER_CLR	GENMASK(9, 8)
+#define MT_CH_TIME_CFG_MDRDY_CLR	GENMASK(11, 10)
+
+#define MT_PBF_LIFE_TIMER		0x1110
+
+#define MT_BEACON_TIME_CFG		0x1114
+#define MT_BEACON_TIME_CFG_INTVAL	GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN	BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE	GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN	BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX	BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP	GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG		0x1118
+#define MT_TBTT_TIMER_CFG		0x1124
+
+#define MT_INT_TIMER_CFG		0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT	GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER	GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN			0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN	BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN	BIT(1)
+
+#define MT_CH_IDLE			0x1130
+#define MT_CH_BUSY			0x1134
+#define MT_EXT_CH_BUSY			0x1138
+#define MT_ED_CCA_TIMER			0x1140
+
+#define MT_MAC_STATUS			0x1200
+#define MT_MAC_STATUS_TX		BIT(0)
+#define MT_MAC_STATUS_RX		BIT(1)
+
+#define MT_PWR_PIN_CFG			0x1204
+#define MT_AUX_CLK_CFG			0x120c
+
+#define MT_BB_PA_MODE_CFG0		0x1214
+#define MT_BB_PA_MODE_CFG1		0x1218
+#define MT_RF_PA_MODE_CFG0		0x121c
+#define MT_RF_PA_MODE_CFG1		0x1220
+
+#define MT_RF_PA_MODE_ADJ0		0x1228
+#define MT_RF_PA_MODE_ADJ1		0x122c
+
+#define MT_DACCLK_EN_DLY_CFG		0x1264
+
+#define MT_EDCA_CFG_BASE		0x1300
+#define MT_EDCA_CFG_AC(_n)		(MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP		GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN		GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN		GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX		GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0			0x1314
+#define MT_TX_PWR_CFG_1			0x1318
+#define MT_TX_PWR_CFG_2			0x131c
+#define MT_TX_PWR_CFG_3			0x1320
+#define MT_TX_PWR_CFG_4			0x1324
+
+#define MT_TX_BAND_CFG			0x132c
+#define MT_TX_BAND_CFG_UPPER_40M	BIT(0)
+#define MT_TX_BAND_CFG_5G		BIT(1)
+#define MT_TX_BAND_CFG_2G		BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY		0x1384
+#define MT_TX_MPDU_ADJ_INT		0x1388
+
+#define MT_TX_PWR_CFG_7			0x13d4
+#define MT_TX_PWR_CFG_8			0x13d8
+#define MT_TX_PWR_CFG_9			0x13dc
+
+#define MT_TX_SW_CFG0			0x1330
+#define MT_TX_SW_CFG1			0x1334
+#define MT_TX_SW_CFG2			0x1338
+
+#define MT_TXOP_CTRL_CFG		0x1340
+
+#define MT_TX_RTS_CFG			0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT	GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH		GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK		BIT(24)
+
+#define MT_TX_TIMEOUT_CFG		0x1348
+#define MT_TX_TIMEOUT_CFG_ACKTO		GENMASK(15, 8)
+
+#define MT_TX_RETRY_CFG			0x134c
+#define MT_VHT_HT_FBK_CFG1		0x1358
+
+#define MT_PROT_CFG_RATE		GENMASK(15, 0)
+#define MT_PROT_CFG_CTRL		GENMASK(17, 16)
+#define MT_PROT_CFG_NAV			GENMASK(19, 18)
+#define MT_PROT_CFG_TXOP_ALLOW		GENMASK(25, 20)
+#define MT_PROT_CFG_RTS_THRESH		BIT(26)
+
+#define MT_CCK_PROT_CFG			0x1364
+#define MT_OFDM_PROT_CFG		0x1368
+#define MT_MM20_PROT_CFG		0x136c
+#define MT_MM40_PROT_CFG		0x1370
+#define MT_GF20_PROT_CFG		0x1374
+#define MT_GF40_PROT_CFG		0x1378
+
+#define MT_EXP_ACK_TIME			0x1380
+
+#define MT_TX_PWR_CFG_0_EXT		0x1390
+#define MT_TX_PWR_CFG_1_EXT		0x1394
+
+#define MT_TX_FBK_LIMIT			0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK	GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK	GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR	BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR	BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT	BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR		0x13a0
+#define MT_TX1_RF_GAIN_CORR		0x13a4
+
+#define MT_TX_ALC_CFG_0			0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0	GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1	GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0		GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1		GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1			0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2			0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP	GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_3			0x13ac
+#define MT_TX_ALC_CFG_4			0x13c0
+#define MT_TX_ALC_CFG_4_LOWGAIN_CH_EN	BIT(31)
+
+#define MT_TX_ALC_VGA3			0x13c8
+
+#define MT_TX_PROT_CFG6			0x13e0
+#define MT_TX_PROT_CFG7			0x13e4
+#define MT_TX_PROT_CFG8			0x13e8
+
+#define MT_PIFS_TX_CFG			0x13ec
+
+#define MT_RX_FILTR_CFG			0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR		BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR		BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC		BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS	BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR		BIT(4)
+#define MT_RX_FILTR_CFG_MCAST		BIT(5)
+#define MT_RX_FILTR_CFG_BCAST		BIT(6)
+#define MT_RX_FILTR_CFG_DUP		BIT(7)
+#define MT_RX_FILTR_CFG_CFACK		BIT(8)
+#define MT_RX_FILTR_CFG_CFEND		BIT(9)
+#define MT_RX_FILTR_CFG_ACK		BIT(10)
+#define MT_RX_FILTR_CFG_CTS		BIT(11)
+#define MT_RX_FILTR_CFG_RTS		BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL		BIT(13)
+#define MT_RX_FILTR_CFG_BA		BIT(14)
+#define MT_RX_FILTR_CFG_BAR		BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV	BIT(16)
+
+#define MT_LEGACY_BASIC_RATE		0x1408
+#define MT_HT_BASIC_RATE		0x140c
+
+#define MT_HT_CTRL_CFG			0x1410
+
+#define MT_EXT_CCA_CFG			0x141c
+#define MT_EXT_CCA_CFG_CCA0		GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1		GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2		GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3		GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK		GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK	GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3			0x1478
+
+#define MT_PN_PAD_MODE			0x150c
+
+#define MT_TXOP_HLDR_ET			0x1608
+
+#define MT_PROT_AUTO_TX_CFG		0x1648
+#define MT_PROT_AUTO_TX_CFG_PROT_PADJ	GENMASK(11, 8)
+#define MT_PROT_AUTO_TX_CFG_AUTO_PADJ	GENMASK(27, 24)
+
+#define MT_RX_STAT_0			0x1700
+#define MT_RX_STAT_0_CRC_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_0_PHY_ERRORS		GENMASK(31, 16)
+
+#define MT_RX_STAT_1			0x1704
+#define MT_RX_STAT_1_CCA_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_1_PLCP_ERRORS	GENMASK(31, 16)
+
+#define MT_RX_STAT_2			0x1708
+#define MT_RX_STAT_2_DUP_ERRORS		GENMASK(15, 0)
+#define MT_RX_STAT_2_OVERFLOW_ERRORS	GENMASK(31, 16)
+
+#define MT_TX_STAT_FIFO			0x1718
+#define MT_TX_STAT_FIFO_VALID		BIT(0)
+#define MT_TX_STAT_FIFO_SUCCESS		BIT(5)
+#define MT_TX_STAT_FIFO_AGGR		BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ		BIT(7)
+#define MT_TX_STAT_FIFO_WCID		GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE		GENMASK(31, 16)
+
+#define MT_TX_AGG_CNT_BASE0		0x1720
+#define MT_TX_AGG_CNT_BASE1		0x174c
+
+#define MT_TX_AGG_CNT(_id)		((_id) < 8 ?			\
+					 MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+					 MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT		0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY	GENMASK(7, 0)
+#define MT_TX_STAT_FIFO_EXT_PKTID	GENMASK(15, 8)
+
+#define MT_WCID_TX_RATE_BASE		0x1c00
+#define MT_WCID_TX_RATE(_i)		(MT_WCID_TX_RATE_BASE + ((_i) << 3))
+
+#define MT_BBP_CORE_BASE		0x2000
+#define MT_BBP_IBI_BASE			0x2100
+#define MT_BBP_AGC_BASE			0x2300
+#define MT_BBP_TXC_BASE			0x2400
+#define MT_BBP_RXC_BASE			0x2500
+#define MT_BBP_TXO_BASE			0x2600
+#define MT_BBP_TXBE_BASE		0x2700
+#define MT_BBP_RXFE_BASE		0x2800
+#define MT_BBP_RXO_BASE			0x2900
+#define MT_BBP_DFS_BASE			0x2a00
+#define MT_BBP_TR_BASE			0x2b00
+#define MT_BBP_CAL_BASE			0x2c00
+#define MT_BBP_DSC_BASE			0x2e00
+#define MT_BBP_PFMU_BASE		0x2f00
+
+#define MT_BBP(_type, _n)		(MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW		GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN		GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW		GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_HIGH_GAIN	GENMASK(21, 16)
+#define MT_BBP_AGC_LNA_MID_GAIN		GENMASK(13, 8)
+#define MT_BBP_AGC_LNA_LOW_GAIN		GENMASK(5, 0)
+
+/* AGC, R6/R7 */
+#define MT_BBP_AGC_LNA_ULOW_GAIN	GENMASK(5, 0)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_LNA_GAIN_MODE	GENMASK(7, 6)
+#define MT_BBP_AGC_GAIN			GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0		GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1		GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN	GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE		0x1800
+#define MT_WCID_ADDR(_n)		(MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE			0x4000
+
+#define MT_WCID_KEY_BASE		0x8000
+#define MT_WCID_KEY(_n)			(MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE			0xa000
+#define MT_WCID_IV(_n)			(MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE		0xa800
+#define MT_WCID_ATTR(_n)		(MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE		BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE		GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX		GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF		GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT	BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT	BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC		BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID		GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0			0xac00
+#define MT_SKEY_BASE_1			0xb400
+#define MT_SKEY_0(_bss, _idx)		(MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)		(MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)		((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0		0xb000
+#define MT_SKEY_MODE_BASE_1		0xb3f0
+#define MT_SKEY_MODE_0(_bss)		(MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)		(MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)		((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK		GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx)	(4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE			0xc000
+
+#define MT_TEMP_SENSOR			0x1d000
+#define MT_TEMP_SENSOR_VAL		GENMASK(6, 0)
+
+struct mt76_wcid_addr {
+	u8 macaddr[6];
+	__le16 ba_mask;
+} __packed __aligned(4);
+
+struct mt76_wcid_key {
+	u8 key[16];
+	u8 tx_mic[8];
+	u8 rx_mic[8];
+} __packed __aligned(4);
+
+enum mt76x2_cipher_type {
+	MT_CIPHER_NONE,
+	MT_CIPHER_WEP40,
+	MT_CIPHER_WEP104,
+	MT_CIPHER_TKIP,
+	MT_CIPHER_AES_CCMP,
+	MT_CIPHER_CKIP40,
+	MT_CIPHER_CKIP104,
+	MT_CIPHER_CKIP128,
+	MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
new file mode 100644
index 0000000..a09f117
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "mt76x2_trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
new file mode 100644
index 0000000..4cd42414
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_trace.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76x2_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76x2_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76x2.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76x2
+
+#define MAXNAME		32
+#define DEV_ENTRY	__array(char, wiphy_name, 32)
+#define DEV_ASSIGN	strlcpy(__entry->wiphy_name, wiphy_name(mt76_hw(dev)->wiphy), MAXNAME)
+#define DEV_PR_FMT	"%s"
+#define DEV_PR_ARG	__entry->wiphy_name
+
+#define TXID_ENTRY	__field(u8, wcid) __field(u8, pktid)
+#define TXID_ASSIGN	__entry->wcid = wcid; __entry->pktid = pktid
+#define TXID_PR_FMT	" [%d:%d]"
+#define TXID_PR_ARG	__entry->wcid, __entry->pktid
+
+DECLARE_EVENT_CLASS(dev_evt,
+	TP_PROTO(struct mt76x2_dev *dev),
+	TP_ARGS(dev),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+	),
+	TP_printk(DEV_PR_FMT, DEV_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(dev_txid_evt,
+	TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+	TP_ARGS(dev, wcid, pktid),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		TXID_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		TXID_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT TXID_PR_FMT,
+		DEV_PR_ARG, TXID_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_evt, mac_txstat_poll,
+	TP_PROTO(struct mt76x2_dev *dev),
+	TP_ARGS(dev)
+);
+
+DEFINE_EVENT(dev_txid_evt, mac_txdone_add,
+	TP_PROTO(struct mt76x2_dev *dev, u8 wcid, u8 pktid),
+	TP_ARGS(dev, wcid, pktid)
+);
+
+TRACE_EVENT(mac_txstat_fetch,
+	TP_PROTO(struct mt76x2_dev *dev,
+		 struct mt76x2_tx_status *stat),
+
+	TP_ARGS(dev, stat),
+
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		TXID_ENTRY
+		__field(bool, success)
+		__field(bool, aggr)
+		__field(bool, ack_req)
+		__field(u16, rate)
+		__field(u8, retry)
+	),
+
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->success = stat->success;
+		__entry->aggr = stat->aggr;
+		__entry->ack_req = stat->ack_req;
+		__entry->wcid = stat->wcid;
+		__entry->pktid = stat->pktid;
+		__entry->rate = stat->rate;
+		__entry->retry = stat->retry;
+	),
+
+	TP_printk(
+		DEV_PR_FMT TXID_PR_FMT
+		" success:%d aggr:%d ack_req:%d"
+		" rate:%04x retry:%d",
+		DEV_PR_ARG, TXID_PR_ARG,
+		__entry->success, __entry->aggr, __entry->ack_req,
+		__entry->rate, __entry->retry
+	)
+);
+
+
+TRACE_EVENT(dev_irq,
+	TP_PROTO(struct mt76x2_dev *dev, u32 val, u32 mask),
+
+	TP_ARGS(dev, val, mask),
+
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, val)
+		__field(u32, mask)
+	),
+
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->val = val;
+		__entry->mask = mask;
+	),
+
+	TP_printk(
+		DEV_PR_FMT " %08x & %08x",
+		DEV_PR_ARG, __entry->val, __entry->mask
+	)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mt76x2_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
new file mode 100644
index 0000000..1a32e1f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x2.h"
+#include "mt76x2_dma.h"
+
+struct beacon_bc_data {
+	struct mt76x2_dev *dev;
+	struct sk_buff_head q;
+	struct sk_buff *tail[8];
+};
+
+void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+	     struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76x2_dev *dev = hw->priv;
+	struct ieee80211_vif *vif = info->control.vif;
+	struct mt76_wcid *wcid = &dev->global_wcid;
+
+	if (control->sta) {
+		struct mt76x2_sta *msta;
+
+		msta = (struct mt76x2_sta *) control->sta->drv_priv;
+		wcid = &msta->wcid;
+	} else if (vif) {
+		struct mt76x2_vif *mvif;
+
+		mvif = (struct mt76x2_vif *) vif->drv_priv;
+		wcid = &mvif->group_wcid;
+	}
+
+	mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+		ieee80211_free_txskb(mt76_hw(dev), skb);
+	} else {
+		ieee80211_tx_info_clear_status(info);
+		info->status.rates[0].idx = -1;
+		info->flags |= IEEE80211_TX_STAT_ACK;
+		ieee80211_tx_status(mt76_hw(dev), skb);
+	}
+}
+
+s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
+			       const struct ieee80211_tx_rate *rate)
+{
+	s8 max_txpwr;
+
+	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
+
+		if (mcs == 8 || mcs == 9) {
+			max_txpwr = dev->rate_power.vht[8];
+		} else {
+			u8 nss, idx;
+
+			nss = ieee80211_rate_get_vht_nss(rate);
+			idx = ((nss - 1) << 3) + mcs;
+			max_txpwr = dev->rate_power.ht[idx & 0xf];
+		}
+	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
+		max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
+	} else {
+		enum nl80211_band band = dev->mt76.chandef.chan->band;
+
+		if (band == NL80211_BAND_2GHZ) {
+			const struct ieee80211_rate *r;
+			struct wiphy *wiphy = mt76_hw(dev)->wiphy;
+			struct mt76_rate_power *rp = &dev->rate_power;
+
+			r = &wiphy->bands[band]->bitrates[rate->idx];
+			if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
+				max_txpwr = rp->cck[r->hw_value & 0x3];
+			else
+				max_txpwr = rp->ofdm[r->hw_value & 0x7];
+		} else {
+			max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
+		}
+	}
+
+	return max_txpwr;
+}
+
+s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj)
+{
+	txpwr = min_t(s8, txpwr, dev->txpower_conf);
+	txpwr -= (dev->target_power + dev->target_power_delta[0]);
+	txpwr = min_t(s8, txpwr, max_txpwr_adj);
+
+	if (!dev->enable_tpc)
+		return 0;
+	else if (txpwr >= 0)
+		return min_t(s8, txpwr, 7);
+	else
+		return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
+}
+
+void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr)
+{
+	s8 txpwr_adj;
+
+	txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, txpwr,
+					    dev->rate_power.ofdm[4]);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
+	mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
+		       MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj);
+}
+
+static int mt76x2_insert_hdr_pad(struct sk_buff *skb)
+{
+	int len = ieee80211_get_hdrlen_from_skb(skb);
+
+	if (len % 4 == 0)
+		return 0;
+
+	skb_push(skb, 2);
+	memmove(skb->data, skb->data + 2, len);
+
+	skb->data[len] = 0;
+	skb->data[len + 1] = 0;
+	return 2;
+}
+
+int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+			  struct sk_buff *skb, struct mt76_queue *q,
+			  struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+			  u32 *tx_info)
+{
+	struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	int qsel = MT_QSEL_EDCA;
+	int ret;
+
+	if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+		mt76x2_mac_wcid_set_drop(dev, wcid->idx, false);
+
+	mt76x2_mac_write_txwi(dev, txwi, skb, wcid, sta);
+
+	ret = mt76x2_insert_hdr_pad(skb);
+	if (ret < 0)
+		return ret;
+
+	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+		qsel = MT_QSEL_MGMT;
+
+	*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+		   MT_TXD_INFO_80211;
+
+	if (!wcid || wcid->hw_key_idx == 0xff)
+		*tx_info |= MT_TXD_INFO_WIV;
+
+	return 0;
+}
+
+static void
+mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *) priv;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	struct sk_buff *skb = NULL;
+
+	if (!(dev->beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	mt76x2_mac_set_beacon(dev, mvif->idx, skb);
+}
+
+static void
+mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct beacon_bc_data *data = priv;
+	struct mt76x2_dev *dev = data->dev;
+	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+	struct ieee80211_tx_info *info;
+	struct sk_buff *skb;
+
+	if (!(dev->beacon_mask & BIT(mvif->idx)))
+		return;
+
+	skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+	if (!skb)
+		return;
+
+	info = IEEE80211_SKB_CB(skb);
+	info->control.vif = vif;
+	info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+	mt76_skb_set_moredata(skb, true);
+	__skb_queue_tail(&data->q, skb);
+	data->tail[mvif->idx] = skb;
+}
+
+void mt76x2_pre_tbtt_tasklet(unsigned long arg)
+{
+	struct mt76x2_dev *dev = (struct mt76x2_dev *) arg;
+	struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+	struct beacon_bc_data data = {};
+	struct sk_buff *skb;
+	int i, nframes;
+
+	data.dev = dev;
+	__skb_queue_head_init(&data.q);
+
+	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+		IEEE80211_IFACE_ITER_RESUME_ALL,
+		mt76x2_update_beacon_iter, dev);
+
+	do {
+		nframes = skb_queue_len(&data.q);
+		ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+			IEEE80211_IFACE_ITER_RESUME_ALL,
+			mt76x2_add_buffered_bc, &data);
+	} while (nframes != skb_queue_len(&data.q));
+
+	if (!nframes)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+		if (!data.tail[i])
+			continue;
+
+		mt76_skb_set_moredata(data.tail[i], false);
+	}
+
+	spin_lock_bh(&q->lock);
+	while ((skb = __skb_dequeue(&data.q)) != NULL) {
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		struct ieee80211_vif *vif = info->control.vif;
+		struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
+
+		mt76_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, NULL);
+	}
+	spin_unlock_bh(&q->lock);
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/trace.c b/drivers/net/wireless/mediatek/mt76/trace.c
new file mode 100644
index 0000000..ea4ab87
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/trace.h b/drivers/net/wireless/mediatek/mt76/trace.h
new file mode 100644
index 0000000..ea30895
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/trace.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(__MT76_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT76_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt76.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt76
+
+#define MAXNAME		32
+#define DEV_ENTRY   __array(char, wiphy_name, 32)
+#define DEV_ASSIGN  strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT  "%s"
+#define DEV_PR_ARG  __entry->wiphy_name
+
+#define REG_ENTRY	__field(u32, reg) __field(u32, val)
+#define REG_ASSIGN	__entry->reg = reg; __entry->val = val
+#define REG_PR_FMT	" %04x=%08x"
+#define REG_PR_ARG	__entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		REG_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		REG_ASSIGN;
+	),
+	TP_printk(
+		DEV_PR_FMT REG_PR_FMT,
+		DEV_PR_ARG, REG_PR_ARG
+	)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_rr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_wr,
+	TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
+	TP_ARGS(dev, reg, val)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
new file mode 100644
index 0000000..4eef69b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76.h"
+
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t;
+	dma_addr_t addr;
+	int size;
+
+	size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
+	t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+	if (!t)
+		return NULL;
+
+	addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+			      DMA_TO_DEVICE);
+	t->dma_addr = addr;
+
+	return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t = NULL;
+
+	spin_lock_bh(&dev->lock);
+	if (!list_empty(&dev->txwi_cache)) {
+		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+				     list);
+		list_del(&t->list);
+	}
+	spin_unlock_bh(&dev->lock);
+
+	return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+	if (t)
+		return t;
+
+	return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+	if (!t)
+		return;
+
+	spin_lock_bh(&dev->lock);
+	list_add(&t->list, &dev->txwi_cache);
+	spin_unlock_bh(&dev->lock);
+}
+
+void mt76_tx_free(struct mt76_dev *dev)
+{
+	struct mt76_txwi_cache *t;
+
+	while ((t = __mt76_get_txwi(dev)) != NULL)
+		dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+				 DMA_TO_DEVICE);
+}
+
+static int
+mt76_txq_get_qid(struct ieee80211_txq *txq)
+{
+	if (!txq->sta)
+		return MT_TXQ_BE;
+
+	return txq->ac;
+}
+
+int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+		      struct sk_buff *skb, struct mt76_wcid *wcid,
+		      struct ieee80211_sta *sta)
+{
+	struct mt76_queue_entry e;
+	struct mt76_txwi_cache *t;
+	struct mt76_queue_buf buf[32];
+	struct sk_buff *iter;
+	dma_addr_t addr;
+	int len;
+	u32 tx_info = 0;
+	int n, ret;
+
+	t = mt76_get_txwi(dev);
+	if (!t) {
+		ieee80211_free_txskb(dev->hw, skb);
+		return -ENOMEM;
+	}
+
+	dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
+				DMA_TO_DEVICE);
+	ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
+				       &tx_info);
+	dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
+				   DMA_TO_DEVICE);
+	if (ret < 0)
+		goto free;
+
+	len = skb->len - skb->data_len;
+	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev->dev, addr)) {
+		ret = -ENOMEM;
+		goto free;
+	}
+
+	n = 0;
+	buf[n].addr = t->dma_addr;
+	buf[n++].len = dev->drv->txwi_size;
+	buf[n].addr = addr;
+	buf[n++].len = len;
+
+	skb_walk_frags(skb, iter) {
+		if (n == ARRAY_SIZE(buf))
+			goto unmap;
+
+		addr = dma_map_single(dev->dev, iter->data, iter->len,
+				      DMA_TO_DEVICE);
+		if (dma_mapping_error(dev->dev, addr))
+			goto unmap;
+
+		buf[n].addr = addr;
+		buf[n++].len = iter->len;
+	}
+
+	if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+		goto unmap;
+
+	return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t);
+
+unmap:
+	ret = -ENOMEM;
+	for (n--; n > 0; n--)
+		dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
+				 DMA_TO_DEVICE);
+
+free:
+	e.skb = skb;
+	e.txwi = t;
+	dev->drv->tx_complete_skb(dev, q, &e, true);
+	mt76_put_txwi(dev, t);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_queue_skb);
+
+void
+mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
+	struct mt76_wcid *wcid, struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76_queue *q;
+	int qid = skb_get_queue_mapping(skb);
+
+	if (WARN_ON(qid >= MT_TXQ_PSD)) {
+		qid = MT_TXQ_BE;
+		skb_set_queue_mapping(skb, qid);
+	}
+
+	if (!wcid->tx_rate_set)
+		ieee80211_get_tx_rates(info->control.vif, sta, skb,
+				       info->control.rates, 1);
+
+	q = &dev->q_tx[qid];
+
+	spin_lock_bh(&q->lock);
+	mt76_tx_queue_skb(dev, q, skb, wcid, sta);
+	dev->queue_ops->kick(dev, q);
+
+	if (q->queued > q->ndesc - 8)
+		ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+	spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx);
+
+static struct sk_buff *
+mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
+{
+	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+	struct sk_buff *skb;
+
+	skb = skb_dequeue(&mtxq->retry_q);
+	if (skb) {
+		u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+		if (ps && skb_queue_empty(&mtxq->retry_q))
+			ieee80211_sta_set_buffered(txq->sta, tid, false);
+
+		return skb;
+	}
+
+	skb = ieee80211_tx_dequeue(dev->hw, txq);
+	if (!skb)
+		return NULL;
+
+	return skb;
+}
+
+static void
+mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+	if (!ieee80211_is_data_qos(hdr->frame_control))
+		return;
+
+	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
+}
+
+static void
+mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
+		  struct sk_buff *skb, bool last)
+{
+	struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+
+	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
+	if (last)
+		info->flags |= IEEE80211_TX_STATUS_EOSP;
+
+	mt76_skb_set_moredata(skb, !last);
+	mt76_tx_queue_skb(dev, hwq, skb, wcid, sta);
+}
+
+void
+mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+			     u16 tids, int nframes,
+			     enum ieee80211_frame_release_type reason,
+			     bool more_data)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct sk_buff *last_skb = NULL;
+	struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+	int i;
+
+	spin_lock_bh(&hwq->lock);
+	for (i = 0; tids && nframes; i++, tids >>= 1) {
+		struct ieee80211_txq *txq = sta->txq[i];
+		struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+		struct sk_buff *skb;
+
+		if (!(tids & 1))
+			continue;
+
+		do {
+			skb = mt76_txq_dequeue(dev, mtxq, true);
+			if (!skb)
+				break;
+
+			if (mtxq->aggr)
+				mt76_check_agg_ssn(mtxq, skb);
+
+			nframes--;
+			if (last_skb)
+				mt76_queue_ps_skb(dev, sta, last_skb, false);
+
+			last_skb = skb;
+		} while (nframes);
+	}
+
+	if (last_skb) {
+		mt76_queue_ps_skb(dev, sta, last_skb, true);
+		dev->queue_ops->kick(dev, hwq);
+	}
+	spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
+
+static int
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+		    struct mt76_txq *mtxq, bool *empty)
+{
+	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+	struct ieee80211_tx_info *info;
+	struct mt76_wcid *wcid = mtxq->wcid;
+	struct sk_buff *skb;
+	int n_frames = 1, limit;
+	struct ieee80211_tx_rate tx_rate;
+	bool ampdu;
+	bool probe;
+	int idx;
+
+	skb = mt76_txq_dequeue(dev, mtxq, false);
+	if (!skb) {
+		*empty = true;
+		return 0;
+	}
+
+	info = IEEE80211_SKB_CB(skb);
+	if (!wcid->tx_rate_set)
+		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+				       info->control.rates, 1);
+	tx_rate = info->control.rates[0];
+
+	probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+	ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
+	limit = ampdu ? 16 : 3;
+
+	if (ampdu)
+		mt76_check_agg_ssn(mtxq, skb);
+
+	idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+
+	if (idx < 0)
+		return idx;
+
+	do {
+		bool cur_ampdu;
+
+		if (probe)
+			break;
+
+		if (test_bit(MT76_SCANNING, &dev->state) ||
+		    test_bit(MT76_RESET, &dev->state))
+			return -EBUSY;
+
+		skb = mt76_txq_dequeue(dev, mtxq, false);
+		if (!skb) {
+			*empty = true;
+			break;
+		}
+
+		info = IEEE80211_SKB_CB(skb);
+		cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+
+		if (ampdu != cur_ampdu ||
+		    (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+			skb_queue_tail(&mtxq->retry_q, skb);
+			break;
+		}
+
+		info->control.rates[0] = tx_rate;
+
+		if (cur_ampdu)
+			mt76_check_agg_ssn(mtxq, skb);
+
+		idx = mt76_tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+		if (idx < 0)
+			return idx;
+
+		n_frames++;
+	} while (n_frames < limit);
+
+	if (!probe) {
+		hwq->swq_queued++;
+		hwq->entry[idx].schedule = true;
+	}
+
+	dev->queue_ops->kick(dev, hwq);
+
+	return n_frames;
+}
+
+static int
+mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+	struct mt76_txq *mtxq, *mtxq_last;
+	int len = 0;
+
+restart:
+	mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
+	while (!list_empty(&hwq->swq)) {
+		bool empty = false;
+		int cur;
+
+		mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
+		if (mtxq->send_bar && mtxq->aggr) {
+			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
+			struct ieee80211_sta *sta = txq->sta;
+			struct ieee80211_vif *vif = txq->vif;
+			u16 agg_ssn = mtxq->agg_ssn;
+			u8 tid = txq->tid;
+
+			mtxq->send_bar = false;
+			spin_unlock_bh(&hwq->lock);
+			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
+			spin_lock_bh(&hwq->lock);
+			goto restart;
+		}
+
+		list_del_init(&mtxq->list);
+
+		cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
+		if (!empty)
+			list_add_tail(&mtxq->list, &hwq->swq);
+
+		if (cur < 0)
+			return cur;
+
+		len += cur;
+
+		if (mtxq == mtxq_last)
+			break;
+	}
+
+	return len;
+}
+
+void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+{
+	int len;
+
+	do {
+		if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
+			break;
+
+		len = mt76_txq_schedule_list(dev, hwq);
+	} while (len > 0);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule);
+
+void mt76_txq_schedule_all(struct mt76_dev *dev)
+{
+	int i;
+
+	for (i = 0; i <= MT_TXQ_BK; i++) {
+		struct mt76_queue *q = &dev->q_tx[i];
+
+		spin_lock_bh(&q->lock);
+		mt76_txq_schedule(dev, q);
+		spin_unlock_bh(&q->lock);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
+
+void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
+			 bool send_bar)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+		struct ieee80211_txq *txq = sta->txq[i];
+		struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+		spin_lock_bh(&mtxq->hwq->lock);
+		mtxq->send_bar = mtxq->aggr && send_bar;
+		if (!list_empty(&mtxq->list))
+			list_del_init(&mtxq->list);
+		spin_unlock_bh(&mtxq->hwq->lock);
+	}
+}
+EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
+
+void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+	struct mt76_dev *dev = hw->priv;
+	struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+	struct mt76_queue *hwq = mtxq->hwq;
+
+	spin_lock_bh(&hwq->lock);
+	if (list_empty(&mtxq->list))
+		list_add_tail(&mtxq->list, &hwq->swq);
+	mt76_txq_schedule(dev, hwq);
+	spin_unlock_bh(&hwq->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
+
+void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq;
+	struct mt76_queue *hwq;
+	struct sk_buff *skb;
+
+	if (!txq)
+		return;
+
+	mtxq = (struct mt76_txq *) txq->drv_priv;
+	hwq = mtxq->hwq;
+
+	spin_lock_bh(&hwq->lock);
+	if (!list_empty(&mtxq->list))
+		list_del(&mtxq->list);
+	spin_unlock_bh(&hwq->lock);
+
+	while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
+		ieee80211_free_txskb(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_txq_remove);
+
+void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
+{
+	struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+
+	INIT_LIST_HEAD(&mtxq->list);
+	skb_queue_head_init(&mtxq->retry_q);
+
+	mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+}
+EXPORT_SYMBOL_GPL(mt76_txq_init);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
new file mode 100644
index 0000000..0c35b8d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "mt76.h"
+
+bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		 int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		cur = dev->bus->rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		udelay(10);
+	} while (timeout-- > 0);
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll);
+
+bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+		      int timeout)
+{
+	u32 cur;
+
+	timeout /= 10;
+	do {
+		cur = dev->bus->rr(dev, offset) & mask;
+		if (cur == val)
+			return true;
+
+		usleep_range(10000, 20000);
+	} while (timeout-- > 0);
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+
+int mt76_wcid_alloc(unsigned long *mask, int size)
+{
+	int i, idx = 0, cur;
+
+	for (i = 0; i < size / BITS_PER_LONG; i++) {
+		idx = ffs(~mask[i]);
+		if (!idx)
+			continue;
+
+		idx--;
+		cur = i * BITS_PER_LONG + idx;
+		if (cur >= size)
+			break;
+
+		mask[i] |= BIT(idx);
+		return cur;
+	}
+
+	return -1;
+}
+EXPORT_SYMBOL_GPL(mt76_wcid_alloc);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
new file mode 100644
index 0000000..018d475
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#define MT76_INCR(_var, _size) \
+	_var = (((_var) + 1) % _size)
+
+int mt76_wcid_alloc(unsigned long *mask, int size);
+
+static inline void
+mt76_wcid_free(unsigned long *mask, int idx)
+{
+	mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+}
+
+static inline void
+mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+
+	if (enable)
+		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+	else
+		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index e6668ff..ff0971f 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -688,10 +688,7 @@ static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
 	cck_power = priv->channels[channel - 1].hw_value & 0xF;
 	ofdm_power = priv->channels[channel - 1].hw_value >> 4;
 
-	if (cck_power > 15)
-		cck_power = (priv->hw_rev == RTL8187BvB) ? 15 : 22;
-	else
-		cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
+	cck_power += (priv->hw_rev == RTL8187BvB) ? 0 : 7;
 	cck_power += priv->txpwr_base & 0xF;
 	cck_power = min(cck_power, (u8)35);
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index cad2272..704741d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -1726,7 +1726,7 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
 void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
 {
 	struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
-	u8 reject_agg, ctrl_agg_size = 0, agg_size;
+	u8 reject_agg = 0, ctrl_agg_size = 0, agg_size = 0;
 
 	if (rtlpriv->cfg->ops->get_btc_status())
 		btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index c2575b0..4013394 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -59,6 +59,7 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	__le16 fc = rtl_get_fc(skb);
 	u8 queue_index = skb_get_queue_mapping(skb);
+	struct ieee80211_hdr *hdr;
 
 	if (unlikely(ieee80211_is_beacon(fc)))
 		return BEACON_QUEUE;
@@ -67,6 +68,13 @@ static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
 		if (ieee80211_is_nullfunc(fc))
 			return HIGH_QUEUE;
+	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
+		hdr = rtl_get_hdr(skb);
+
+		if (is_multicast_ether_addr(hdr->addr1) ||
+		    is_broadcast_ether_addr(hdr->addr1))
+			return HIGH_QUEUE;
+	}
 
 	return ac_to_hwq[queue_index];
 }
@@ -557,13 +565,6 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
 		else
 			entry = (u8 *)(&ring->desc[ring->idx]);
 
-		if (rtlpriv->cfg->ops->get_available_desc &&
-		    rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
-			RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
-				 "no available desc!\n");
-			return;
-		}
-
 		if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
 			return;
 		ring->idx = (ring->idx + 1) % ring->entries;
@@ -747,7 +748,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 	u8 tmp_one;
 	bool unicast = false;
 	u8 hw_queue = 0;
-	unsigned int rx_remained_cnt;
+	unsigned int rx_remained_cnt = 0;
 	struct rtl_stats stats = {
 		.signal = 0,
 		.rate = 0,
@@ -768,7 +769,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 		struct sk_buff *new_skb;
 
 		if (rtlpriv->use_new_trx_flow) {
-			rx_remained_cnt =
+			if (rx_remained_cnt == 0)
+				rx_remained_cnt =
 				rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
 								      hw_queue);
 			if (rx_remained_cnt == 0)
@@ -924,10 +926,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	unsigned long flags;
-	u32 inta = 0;
-	u32 intb = 0;
-	u32 intc = 0;
-	u32 intd = 0;
+	struct rtl_int intvec = {0};
+
 	irqreturn_t ret = IRQ_HANDLED;
 
 	if (rtlpci->irq_enabled == 0)
@@ -937,47 +937,47 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	rtlpriv->cfg->ops->disable_interrupt(hw);
 
 	/*read ISR: 4/8bytes */
-	rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb, &intc, &intd);
+	rtlpriv->cfg->ops->interrupt_recognized(hw, &intvec);
 
 	/*Shared IRQ or HW disappeared */
-	if (!inta || inta == 0xffff)
+	if (!intvec.inta || intvec.inta == 0xffff)
 		goto done;
 
 	/*<1> beacon related */
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "beacon ok interrupt!\n");
 
-	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
+	if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "beacon err interrupt!\n");
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "prepare beacon for interrupt!\n");
 		tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
 	}
 
 	/*<2> Tx related */
-	if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
+	if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "Manage ok interrupt!\n");
 		_rtl_pci_tx_isr(hw, MGNT_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 			 "HIGH_QUEUE ok interrupt!\n");
 		_rtl_pci_tx_isr(hw, HIGH_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -985,7 +985,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 		_rtl_pci_tx_isr(hw, BK_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -993,7 +993,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 		_rtl_pci_tx_isr(hw, BE_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1001,7 +1001,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 		_rtl_pci_tx_isr(hw, VI_QUEUE);
 	}
 
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
 		rtlpriv->link_info.num_tx_inperiod++;
 
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1010,7 +1010,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	}
 
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8822BE) {
-		if (intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
+		if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
 			rtlpriv->link_info.num_tx_inperiod++;
 
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1020,7 +1020,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	}
 
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
-		if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
+		if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
 			rtlpriv->link_info.num_tx_inperiod++;
 
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
@@ -1030,25 +1030,25 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	}
 
 	/*<3> Rx related */
-	if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
+	if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
 		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
 		_rtl_pci_rx_interrupt(hw);
 	}
 
-	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
+	if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
 			 "rx descriptor unavailable!\n");
 		_rtl_pci_rx_interrupt(hw);
 	}
 
-	if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
+	if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
 		_rtl_pci_rx_interrupt(hw);
 	}
 
 	/*<4> fw related*/
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
-		if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
+		if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 				 "firmware interrupt!\n");
 			queue_delayed_work(rtlpriv->works.rtl_wq,
@@ -1064,7 +1064,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 	 */
 	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
 	    rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
-		if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
+		if (unlikely(intvec.inta &
+		    rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
 			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 				 "hsisr interrupt!\n");
 			_rtl_pci_hs_interrupt(hw);
@@ -1250,7 +1251,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
 
 		rtlpci->tx_ring[prio].cur_tx_rp = 0;
 		rtlpci->tx_ring[prio].cur_tx_wp = 0;
-		rtlpci->tx_ring[prio].avl_desc = entries;
 	}
 
 	/* alloc dma for this ring */
@@ -1555,7 +1555,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
 				dev_kfree_skb_irq(skb);
 				ring->idx = (ring->idx + 1) % ring->entries;
 			}
+
+			if (rtlpriv->use_new_trx_flow) {
+				rtlpci->tx_ring[i].cur_tx_rp = 0;
+				rtlpci->tx_ring[i].cur_tx_wp = 0;
+			}
+
 			ring->idx = 0;
+			ring->entries = rtlpci->txringcount[i];
 		}
 	}
 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index e7d070e..3fb56c8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -173,7 +173,6 @@ struct rtl8192_tx_ring {
 	/*add for new trx flow*/
 	struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
 	dma_addr_t buffer_desc_dma; /*tx bufferd desc dma memory*/
-	u16 avl_desc; /* available_desc_to_write */
 	u16 cur_tx_wp; /* current_tx_write_point */
 	u16 cur_tx_rp; /* current_tx_read_point */
 };
@@ -320,10 +319,10 @@ static inline void pci_write32_async(struct rtl_priv *rtlpriv,
 	writel(val, (u8 __iomem *)rtlpriv->io.pci_mem_start + addr);
 }
 
-static inline u16 calc_fifo_space(u16 rp, u16 wp)
+static inline u16 calc_fifo_space(u16 rp, u16 wp, u16 size)
 {
 	if (rp <= wp)
-		return RTL_PCI_MAX_RX_COUNT - 1 + rp - wp;
+		return size - 1 + rp - wp;
 	return rp - wp - 1;
 }
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index e30a18e..988d5ac 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -1472,17 +1472,16 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 
 }
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
index cdf49de..214cd2a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
@@ -29,8 +29,7 @@
 void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl88ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl88ee_hw_init(struct ieee80211_hw *hw);
 void rtl88ee_card_disable(struct ieee80211_hw *hw);
 void rtl88ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 0f4c86a..4a81e0e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -1375,19 +1375,13 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
-
-	/*
-	 * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	 * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
-	 */
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 }
 
 void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
index b5c8e2f..6711ea1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
@@ -42,8 +42,7 @@ static inline u8 rtl92c_get_chnl_group(u8 chnl)
 void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92ce_hw_init(struct ieee80211_hw *hw);
 void rtl92ce_card_disable(struct ieee80211_hw *hw);
 void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 0da6c01..80123fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1356,19 +1356,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
-
-	/*
-	 * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	 * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
-	 */
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 }
 
 void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index 9236aa9..e6c702e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -29,8 +29,7 @@
 void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92de_hw_init(struct ieee80211_hw *hw);
 void rtl92de_card_disable(struct ieee80211_hw *hw);
 void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index fe5da63..fd7928f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1694,17 +1694,16 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd)
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl92ee_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
index cd6d332..3a63bec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
@@ -29,8 +29,7 @@
 void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92ee_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92ee_hw_init(struct ieee80211_hw *hw);
 void rtl92ee_card_disable(struct ieee80211_hw *hw);
 void rtl92ee_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 1225568..4f74443 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -498,7 +498,8 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
 	if (!start_rx)
 		return 0;
 
-	remind_cnt = calc_fifo_space(read_point, write_point);
+	remind_cnt = calc_fifo_space(read_point, write_point,
+				     RTL_PCI_MAX_RX_COUNT);
 
 	if (remind_cnt == 0)
 		return 0;
@@ -548,7 +549,6 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
 
 u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
 {
-	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	u16 point_diff = 0;
 	u16 current_tx_read_point = 0, current_tx_write_point = 0;
@@ -560,9 +560,9 @@ u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
 	current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
 
 	point_diff = calc_fifo_space(current_tx_read_point,
-				     current_tx_write_point);
+				     current_tx_write_point,
+				     TX_DESC_NUM_92E);
 
-	rtlpci->tx_ring[q_idx].avl_desc = point_diff;
 	return point_diff;
 }
 
@@ -907,10 +907,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
 		      u8 desc_name, u8 *val)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u16 cur_tx_rp = 0;
-	u16 cur_tx_wp = 0;
-	static bool over_run;
-	u32 tmp = 0;
 	u8 q_idx = *val;
 	bool dma64 = rtlpriv->cfg->mod_params->dma64;
 
@@ -931,38 +927,12 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
 				return;
 			}
 
+			/* make sure tx desc is available by caller */
 			ring->cur_tx_wp = ((ring->cur_tx_wp + 1) % max_tx_desc);
 
-			if (over_run) {
-				ring->cur_tx_wp = 0;
-				over_run = false;
-			}
-			if (ring->avl_desc > 1) {
-				ring->avl_desc--;
-
-				rtl_write_word(rtlpriv,
-					       get_desc_addr_fr_q_idx(q_idx),
-					       ring->cur_tx_wp);
-			}
-
-			if (ring->avl_desc < (max_tx_desc - 15)) {
-				u16 point_diff = 0;
-
-				tmp =
-				  rtl_read_dword(rtlpriv,
-						 get_desc_addr_fr_q_idx(q_idx));
-				cur_tx_rp = (u16)((tmp >> 16) & 0x0fff);
-				cur_tx_wp = (u16)(tmp & 0x0fff);
-
-				ring->cur_tx_wp = cur_tx_wp;
-				ring->cur_tx_rp = cur_tx_rp;
-				point_diff = ((cur_tx_rp > cur_tx_wp) ?
-					      (cur_tx_rp - cur_tx_wp) :
-					      (TX_DESC_NUM_92E - 1 -
-					       cur_tx_wp + cur_tx_rp));
-
-				ring->avl_desc = point_diff;
-			}
+			rtl_write_word(rtlpriv,
+				       get_desc_addr_fr_q_idx(q_idx),
+				       ring->cur_tx_wp);
 		}
 		break;
 		}
@@ -1044,13 +1014,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
 {
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	u16 read_point, write_point, available_desc_num;
+	u16 read_point, write_point;
 	bool ret = false;
 	static u8 stop_report_cnt;
 	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
 
 	{
-		u16 point_diff = 0;
 		u16 cur_tx_rp, cur_tx_wp;
 		u32 tmpu32 = 0;
 
@@ -1060,18 +1029,12 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index)
 		cur_tx_rp = (u16)((tmpu32 >> 16) & 0x0fff);
 		cur_tx_wp = (u16)(tmpu32 & 0x0fff);
 
-		ring->cur_tx_wp = cur_tx_wp;
+		/* don't need to update ring->cur_tx_wp */
 		ring->cur_tx_rp = cur_tx_rp;
-		point_diff = ((cur_tx_rp > cur_tx_wp) ?
-			      (cur_tx_rp - cur_tx_wp) :
-			      (TX_DESC_NUM_92E - cur_tx_wp + cur_tx_rp));
-
-		ring->avl_desc = point_diff;
 	}
 
 	read_point = ring->cur_tx_rp;
 	write_point = ring->cur_tx_wp;
-	available_desc_num = ring->avl_desc;
 
 	if (write_point > read_point) {
 		if (index < write_point && index >= read_point)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 76bf089..30dea7b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -1558,17 +1558,17 @@ void rtl92se_card_disable(struct ieee80211_hw *hw)
 	udelay(100);
 }
 
-void rtl92se_interrupt_recognized(struct ieee80211_hw *hw, u32 *p_inta,
-			     u32 *p_intb, u32 *p_intc, u32 *p_intd)
+void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
+				  struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, ISR + 4) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, ISR + 4, intvec->intb);
 }
 
 void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
index 6070560..fa836ce 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
@@ -42,8 +42,7 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw,
 			u8 variable, u8 *val);
 void rtl92se_read_eeprom_info(struct ieee80211_hw *hw);
 void rtl92se_interrupt_recognized(struct ieee80211_hw *hw,
-				  u32 *p_inta, u32 *p_intb,
-				  u32 *p_intc, u32 *p_intd);
+				  struct rtl_int *int_vec);
 int rtl92se_hw_init(struct ieee80211_hw *hw);
 void rtl92se_card_disable(struct ieee80211_hw *hw);
 void rtl92se_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index c3f98d5..545115d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1340,14 +1340,13 @@ void rtl8723e_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
-				   u32 *p_inta, u32 *p_intb,
-				   u32 *p_intc, u32 *p_intd)
+				   struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, 0x3a0, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, 0x3a0, intvec->inta);
 }
 
 void rtl8723e_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
index 19e467a..c76e453 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
@@ -34,8 +34,7 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8723e_interrupt_recognized(struct ieee80211_hw *hw,
-				   u32 *p_inta, u32 *p_intb,
-				   u32 *p_intc, u32 *p_intd);
+				   struct rtl_int *int_vec);
 int rtl8723e_hw_init(struct ieee80211_hw *hw);
 void rtl8723e_card_disable(struct ieee80211_hw *hw);
 void rtl8723e_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 7cd1ffa..f9ccd13 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -43,6 +43,7 @@
 #include "../pwrseqcmd.h"
 #include "pwrseq.h"
 #include "../btcoexist/rtl_btc.h"
+#include <linux/kernel.h>
 
 #define LLT_CONFIG	5
 
@@ -1682,18 +1683,17 @@ void rtl8723be_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd)
+				    struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
-					rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+				      rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
@@ -2127,28 +2127,28 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
 
 	if (rtlhal->oem_id == RT_CID_DEFAULT) {
 		/* Does this one have a Toshiba SMID from group 1? */
-		for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(toshiba_smid1); i++) {
 			if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
 				is_toshiba_smid1 = true;
 				break;
 			}
 		}
 		/* Does this one have a Toshiba SMID from group 2? */
-		for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(toshiba_smid2); i++) {
 			if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
 				is_toshiba_smid2 = true;
 				break;
 			}
 		}
 		/* Does this one have a Samsung SMID? */
-		for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(samsung_smid); i++) {
 			if (rtlefuse->eeprom_smid == samsung_smid[i]) {
 				is_samsung_smid = true;
 				break;
 			}
 		}
 		/* Does this one have a Lenovo SMID? */
-		for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+		for (i = 0; i < ARRAY_SIZE(lenovo_smid); i++) {
 			if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
 				is_lenovo_smid = true;
 				break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
index 2215a79..ae856a1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
@@ -30,8 +30,7 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd);
+				    struct rtl_int *int_vec);
 int rtl8723be_hw_init(struct ieee80211_hw *hw);
 void rtl8723be_card_disable(struct ieee80211_hw *hw);
 void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 9606641..1263b12 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -35,6 +35,7 @@
 #include "../rtl8723com/dm_common.h"
 #include "table.h"
 #include "trx.h"
+#include <linux/kernel.h>
 
 static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
 static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -1143,14 +1144,13 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
 			     DESC92C_RATEMCS2, DESC92C_RATEMCS3,
 			     DESC92C_RATEMCS4, DESC92C_RATEMCS5,
 			     DESC92C_RATEMCS6, DESC92C_RATEMCS7};
-	u8 i, size;
+	u8 i;
 	u8 power_index;
 
 	if (!rtlefuse->txpwr_fromeprom)
 		return;
 
-	size = sizeof(cck_rates) / sizeof(u8);
-	for (i = 0; i < size; i++) {
+	for (i = 0; i < ARRAY_SIZE(cck_rates); i++) {
 		power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
 					cck_rates[i],
 					rtl_priv(hw)->phy.current_chan_bw,
@@ -1158,8 +1158,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
 		_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
 						 cck_rates[i]);
 	}
-	size = sizeof(ofdm_rates) / sizeof(u8);
-	for (i = 0; i < size; i++) {
+	for (i = 0; i < ARRAY_SIZE(ofdm_rates); i++) {
 		power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
 					ofdm_rates[i],
 					rtl_priv(hw)->phy.current_chan_bw,
@@ -1167,8 +1166,7 @@ void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
 		_rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
 						 ofdm_rates[i]);
 	}
-	size = sizeof(ht_rates_1t) / sizeof(u8);
-	for (i = 0; i < size; i++) {
+	for (i = 0; i < ARRAY_SIZE(ht_rates_1t); i++) {
 		power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
 					ht_rates_1t[i],
 					rtl_priv(hw)->phy.current_chan_bw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
index 381c16b..160fee8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
@@ -25,6 +25,7 @@
  *
  *****************************************************************************/
 
+#include <linux/kernel.h>
 #include "table.h"
 
 u32 RTL8723BEPHY_REG_1TARRAY[] = {
@@ -224,8 +225,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = {
 
 };
 
-u32 RTL8723BEPHY_REG_1TARRAYLEN =
-	sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32);
+u32 RTL8723BEPHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8723BEPHY_REG_1TARRAY);
 
 u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800,
@@ -236,8 +236,7 @@ u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
 };
 
-u32 RTL8723BEPHY_REG_ARRAY_PGLEN =
-		sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8723BEPHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8723BEPHY_REG_ARRAY_PG);
 
 u32 RTL8723BE_RADIOA_1TARRAY[] = {
 		0x000, 0x00010000,
@@ -373,8 +372,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = {
 
 };
 
-u32 RTL8723BE_RADIOA_1TARRAYLEN =
-	sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32);
+u32 RTL8723BE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8723BE_RADIOA_1TARRAY);
 
 u32 RTL8723BEMAC_1T_ARRAY[] = {
 		0x02F, 0x00000030,
@@ -483,7 +481,7 @@ u32 RTL8723BEMAC_1T_ARRAY[] = {
 
 };
 
-u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32);
+u32 RTL8723BEMAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8723BEMAC_1T_ARRAY);
 
 u32 RTL8723BEAGCTAB_1TARRAY[] = {
 		0xC78, 0xFD000001,
@@ -620,4 +618,4 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = {
 
 };
 
-u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32);
+u32 RTL8723BEAGCTAB_1TARRAYLEN = ARRAY_SIZE(RTL8723BEAGCTAB_1TARRAY);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 43e18c4..f20e77b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2483,17 +2483,16 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd)
+				    struct rtl_int *intvec)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-	*p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
-	rtl_write_dword(rtlpriv, ISR, *p_inta);
+	intvec->inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+	rtl_write_dword(rtlpriv, ISR, intvec->inta);
 
-	*p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
-	rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+	intvec->intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
+	rtl_write_dword(rtlpriv, REG_HISRE, intvec->intb);
 }
 
 void rtl8821ae_set_beacon_related_registers(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
index 284d259..e2ab783 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
@@ -30,8 +30,7 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
 void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw);
 
 void rtl8821ae_interrupt_recognized(struct ieee80211_hw *hw,
-				    u32 *p_inta, u32 *p_intb,
-				    u32 *p_intc, u32 *p_intd);
+				    struct rtl_int *int_vec);
 int rtl8821ae_hw_init(struct ieee80211_hw *hw);
 void rtl8821ae_card_disable(struct ieee80211_hw *hw);
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
index 408c461..f87f9d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
@@ -24,7 +24,7 @@
  * Larry Finger <Larry.Finger@lwfinger.net>
  *
  *****************************************************************************/
-
+#include <linux/kernel.h>
 #include "table.h"
 u32 RTL8812AE_PHY_REG_ARRAY[] = {
 		0x800, 0x8020D010,
@@ -258,8 +258,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = {
 		0xEB8, 0x00508242,
 };
 
-u32 RTL8812AE_PHY_REG_1TARRAYLEN =
-	sizeof(RTL8812AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY);
 
 u32 RTL8821AE_PHY_REG_ARRAY[] = {
 	0x800, 0x0020D090,
@@ -436,8 +435,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
 	0xCB8, 0x00508240,
 };
 
-u32 RTL8821AE_PHY_REG_1TARRAYLEN =
-	sizeof(RTL8821AE_PHY_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY);
 
 u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840,
@@ -488,8 +486,7 @@ u32 RTL8812AE_PHY_REG_ARRAY_PG[] = {
 	1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628
 };
 
-u32 RTL8812AE_PHY_REG_ARRAY_PGLEN =
-		sizeof(RTL8812AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8812AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8812AE_PHY_REG_ARRAY_PG);
 
 u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
 	0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
@@ -509,8 +506,7 @@ u32 RTL8821AE_PHY_REG_ARRAY_PG[] = {
 	1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022
 };
 
-u32 RTL8821AE_PHY_REG_ARRAY_PGLEN =
-		sizeof(RTL8821AE_PHY_REG_ARRAY_PG) / sizeof(u32);
+u32 RTL8821AE_PHY_REG_ARRAY_PGLEN = ARRAY_SIZE(RTL8821AE_PHY_REG_ARRAY_PG);
 
 u32 RTL8812AE_RADIOA_ARRAY[] = {
 		0x000, 0x00010000,
@@ -927,7 +923,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = {
 		0x018, 0x0001712A,
 };
 
-u32 RTL8812AE_RADIOA_1TARRAYLEN = sizeof(RTL8812AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOA_ARRAY);
 
 u32 RTL8812AE_RADIOB_ARRAY[] = {
 		0x056, 0x00051CF2,
@@ -1335,7 +1331,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = {
 		0x008, 0x00008400,
 };
 
-u32 RTL8812AE_RADIOB_1TARRAYLEN = sizeof(RTL8812AE_RADIOB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_RADIOB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_RADIOB_ARRAY);
 
 u32 RTL8821AE_RADIOA_ARRAY[] = {
 		0x018, 0x0001712A,
@@ -1929,7 +1925,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
 
 };
 
-u32 RTL8821AE_RADIOA_1TARRAYLEN = sizeof(RTL8821AE_RADIOA_ARRAY) / sizeof(u32);
+u32 RTL8821AE_RADIOA_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_RADIOA_ARRAY);
 
 u32 RTL8812AE_MAC_REG_ARRAY[] = {
 		0x010, 0x0000000C,
@@ -2041,7 +2037,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
 		0x718, 0x00000040,
 };
 
-u32 RTL8812AE_MAC_1T_ARRAYLEN = sizeof(RTL8812AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
 
 u32 RTL8821AE_MAC_REG_ARRAY[] = {
 		0x428, 0x0000000A,
@@ -2143,7 +2139,7 @@ u32 RTL8821AE_MAC_REG_ARRAY[] = {
 		0x718, 0x00000040,
 };
 
-u32 RTL8821AE_MAC_1T_ARRAYLEN = sizeof(RTL8821AE_MAC_REG_ARRAY) / sizeof(u32);
+u32 RTL8821AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8821AE_MAC_REG_ARRAY);
 
 u32 RTL8812AE_AGC_TAB_ARRAY[] = {
 	0x80000001, 0x00000000, 0x40000000, 0x00000000,
@@ -2479,8 +2475,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = {
 		0xE50, 0x00000020,
 };
 
-u32 RTL8812AE_AGC_TAB_1TARRAYLEN =
-	sizeof(RTL8812AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY);
 
 u32 RTL8821AE_AGC_TAB_ARRAY[] = {
 		0x81C, 0xBF000001,
@@ -2676,8 +2671,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
 		0xC50, 0x00000020,
 };
 
-u32 RTL8821AE_AGC_TAB_1TARRAYLEN =
-	sizeof(RTL8821AE_AGC_TAB_ARRAY) / sizeof(u32);
+u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY);
 
 /******************************************************************************
 *                           TXPWR_LMT.TXT
@@ -3250,7 +3244,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = {
 	"MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
 
-u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8812AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT);
 
 u8 *RTL8821AE_TXPWR_LMT[] = {
 	"FCC", "2.4G", "20M", "CCK", "1T", "01", "32",
@@ -3819,4 +3813,4 @@ u8 *RTL8821AE_TXPWR_LMT[] = {
 	"MKK", "5G", "80M", "VHT", "2T", "155", "63"
 };
 
-u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = sizeof(RTL8821AE_TXPWR_LMT) / sizeof(u8 *);
+u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8821AE_TXPWR_LMT);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 92d4859..e2b1479 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2093,14 +2093,21 @@ struct rtl_wow_pattern {
 	u32 mask[4];
 };
 
+/* struct to store contents of interrupt vectors */
+struct rtl_int {
+	u32 inta;
+	u32 intb;
+	u32 intc;
+	u32 intd;
+};
+
 struct rtl_hal_ops {
 	int (*init_sw_vars) (struct ieee80211_hw *hw);
 	void (*deinit_sw_vars) (struct ieee80211_hw *hw);
 	void (*read_chip_version)(struct ieee80211_hw *hw);
 	void (*read_eeprom_info) (struct ieee80211_hw *hw);
 	void (*interrupt_recognized) (struct ieee80211_hw *hw,
-				      u32 *p_inta, u32 *p_intb,
-				      u32 *p_intc, u32 *p_intd);
+				      struct rtl_int *intvec);
 	int (*hw_init) (struct ieee80211_hw *hw);
 	void (*hw_disable) (struct ieee80211_hw *hw);
 	void (*hw_suspend) (struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 1d799bf..e7d77ac 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -310,10 +310,8 @@ static int wl1251_hw_init_data_path_config(struct wl1251 *wl)
 	/* asking for the data path parameters */
 	wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
 				GFP_KERNEL);
-	if (!wl->data_path) {
-		wl1251_error("Couldnt allocate data path parameters");
+	if (!wl->data_path)
 		return -ENOMEM;
-	}
 
 	ret = wl1251_acx_data_path_params(wl, wl->data_path);
 	if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index a485999..3ca9167 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -146,7 +146,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 	ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG,
 				   feature, sizeof(*feature));
 	if (ret < 0) {
-		wl1271_error("Couldnt set HW encryption");
+		wl1271_error("Couldn't set HW encryption");
 		goto out;
 	}
 
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index f46d7fd..7011c5d959 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1129,10 +1129,8 @@ int wl12xx_acx_config_hangover(struct wl1271 *wl);
 int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			    s8 *avg_rssi);
 
-#ifdef CONFIG_PM
 int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
 					enum rx_filter_action action);
 int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
 			     struct wl12xx_rx_filter *filter);
-#endif /* CONFIG_PM */
 #endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d47921a..0971403 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -42,6 +42,7 @@
 #include "sysfs.h"
 
 #define WL1271_BOOT_RETRIES 3
+#define WL1271_SUSPEND_SLEEP 100
 
 static char *fwlog_param;
 static int fwlog_mem_blocks = -1;
@@ -388,7 +389,6 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 {
 	struct wl12xx_vif *wlvif;
-	struct timespec ts;
 	u32 old_tx_blk_count = wl->tx_blocks_available;
 	int avail, freed_blocks;
 	int i;
@@ -485,8 +485,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 	}
 
 	/* update the host-chipset time offset */
-	getnstimeofday(&ts);
-	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
+	wl->time_offset = (ktime_get_boot_ns() >> 10) -
 		(s64)(status->fw_localtime);
 
 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
@@ -979,6 +978,24 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
 }
 
+static int wlcore_fw_sleep(struct wl1271 *wl)
+{
+	int ret;
+
+	mutex_lock(&wl->mutex);
+	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+	if (ret < 0) {
+		wl12xx_queue_recovery_work(wl);
+		goto out;
+	}
+	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
+out:
+	mutex_unlock(&wl->mutex);
+	mdelay(WL1271_SUSPEND_SLEEP);
+
+	return 0;
+}
+
 static int wl1271_setup(struct wl1271 *wl)
 {
 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
@@ -1326,7 +1343,6 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
 }
 
 
-#ifdef CONFIG_PM
 static int
 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
 {
@@ -1698,8 +1714,8 @@ static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 	}
 }
 
-static int wl1271_op_suspend(struct ieee80211_hw *hw,
-			    struct cfg80211_wowlan *wow)
+static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
+					    struct cfg80211_wowlan *wow)
 {
 	struct wl1271 *wl = hw->priv;
 	struct wl12xx_vif *wlvif;
@@ -1749,7 +1765,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 		goto out_sleep;
 
 out_sleep:
-	wl1271_ps_elp_sleep(wl);
 	mutex_unlock(&wl->mutex);
 
 	if (ret < 0) {
@@ -1782,10 +1797,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 	 */
 	cancel_delayed_work(&wl->tx_watchdog_work);
 
+	/*
+	 * Use an immediate call for allowing the firmware to go into power
+	 * save during suspend.
+	 * Using a workque for this last write was only hapenning on resume
+	 * leaving the firmware with power save disabled during suspend,
+	 * while consuming full power during wowlan suspend.
+	 */
+	wlcore_fw_sleep(wl);
+
 	return 0;
 }
 
-static int wl1271_op_resume(struct ieee80211_hw *hw)
+static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
 {
 	struct wl1271 *wl = hw->priv;
 	struct wl12xx_vif *wlvif;
@@ -1869,7 +1893,6 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
 
 	return 0;
 }
-#endif
 
 static int wl1271_op_start(struct ieee80211_hw *hw)
 {
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index a3f5e9c..00e9b46 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -264,7 +264,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 			       struct sk_buff *skb, u32 extra,
 			       struct ieee80211_tx_info *control, u8 hlid)
 {
-	struct timespec ts;
 	struct wl1271_tx_hw_descr *desc;
 	int ac, rate_idx;
 	s64 hosttime;
@@ -287,8 +286,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 	}
 
 	/* configure packet life time */
-	getnstimeofday(&ts);
-	hosttime = (timespec_to_ns(&ts) >> 10);
+	hosttime = (ktime_get_boot_ns() >> 10);
 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
 
 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index a327be1..8c0c927 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -77,6 +77,11 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
 	if (of_property_read_bool(child, "broken-turn-around"))
 		mdio->phy_ignore_ta_mask |= 1 << addr;
 
+	of_property_read_u32(child, "reset-assert-us",
+			     &phy->mdio.reset_assert_delay);
+	of_property_read_u32(child, "reset-deassert-us",
+			     &phy->mdio.reset_deassert_delay);
+
 	/* Associate the OF node with the device structure so it
 	 * can be looked up later */
 	of_node_get(child);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4a7c686..764ca7b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1458,6 +1458,7 @@ struct pci_devres {
 	unsigned int pinned:1;
 	unsigned int orig_intx:1;
 	unsigned int restore_intx:1;
+	unsigned int mwi:1;
 	u32 region_mask;
 };
 
@@ -1476,6 +1477,9 @@ static void pcim_release(struct device *gendev, void *res)
 		if (this->region_mask & (1 << i))
 			pci_release_region(dev, i);
 
+	if (this->mwi)
+		pci_clear_mwi(dev);
+
 	if (this->restore_intx)
 		pci_intx(dev, this->orig_intx);
 
@@ -3761,6 +3765,27 @@ int pci_set_mwi(struct pci_dev *dev)
 EXPORT_SYMBOL(pci_set_mwi);
 
 /**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *dev)
+{
+	struct pci_devres *dr;
+
+	dr = find_pci_dr(dev);
+	if (!dr)
+		return -ENOMEM;
+
+	dr->mwi = 1;
+	return pci_set_mwi(dev);
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+/**
  * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
  * @dev: the PCI device for which MWI is enabled
  *
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a782a20..c7e484f 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -91,9 +91,6 @@
 	  To compile as a module choose M. The module name is qeth_l3.
 	  If unsure, choose Y.
 
-config QETH_IPV6
-	def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
-
 config CCWGROUP
 	tristate
 	default (LCS || CTCM || QETH)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 92ae84a..0ee8f33 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -756,18 +756,14 @@ lcs_get_lancmd(struct lcs_card *card, int count)
 static void
 lcs_get_reply(struct lcs_reply *reply)
 {
-	WARN_ON(atomic_read(&reply->refcnt) <= 0);
-	atomic_inc(&reply->refcnt);
+	refcount_inc(&reply->refcnt);
 }
 
 static void
 lcs_put_reply(struct lcs_reply *reply)
 {
-        WARN_ON(atomic_read(&reply->refcnt) <= 0);
-        if (atomic_dec_and_test(&reply->refcnt)) {
+	if (refcount_dec_and_test(&reply->refcnt))
 		kfree(reply);
-	}
-
 }
 
 static struct lcs_reply *
@@ -780,7 +776,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
 	reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
 	if (!reply)
 		return NULL;
-	atomic_set(&reply->refcnt,1);
+	refcount_set(&reply->refcnt, 1);
 	reply->sequence_no = cmd->sequence_no;
 	reply->received = 0;
 	reply->rc = 0;
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index fbc8b90..bd52caa 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -5,6 +5,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
+#include <linux/refcount.h>
 #include <asm/ccwdev.h>
 
 #define LCS_DBF_TEXT(level, name, text) \
@@ -271,7 +272,7 @@ struct lcs_buffer {
 struct lcs_reply {
 	struct list_head list;
 	__u16 sequence_no;
-	atomic_t refcnt;
+	refcount_t refcnt;
 	/* Callback for completion notification. */
 	void (*callback)(struct lcs_card *, struct lcs_cmd *);
 	wait_queue_head_t wait_q;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index badf42a..db42107 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -21,6 +21,7 @@
 #include <linux/ethtool.h>
 #include <linux/hashtable.h>
 #include <linux/ip.h>
+#include <linux/refcount.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
@@ -296,8 +297,23 @@ struct qeth_hdr_layer3 {
 	__u8  ext_flags;
 	__u16 vlan_id;
 	__u16 frame_offset;
-	__u8  dest_addr[16];
-} __attribute__ ((packed));
+	union {
+		/* TX: */
+		u8 ipv6_addr[16];
+		struct ipv4 {
+			u8 res[12];
+			u32 addr;
+		} ipv4;
+		/* RX: */
+		struct rx {
+			u8 res1[2];
+			u8 src_mac[6];
+			u8 res2[4];
+			u16 vlan_id;
+			u8 res3[2];
+		} rx;
+	} next_hop;
+};
 
 struct qeth_hdr_layer2 {
 	__u8 id;
@@ -504,12 +520,6 @@ struct qeth_qdio_info {
 	int default_out_queue;
 };
 
-#define QETH_ETH_MAC_V4      0x0100 /* like v4 */
-#define QETH_ETH_MAC_V6      0x3333 /* like v6 */
-/* tr mc mac is longer, but that will be enough to detect mc frames */
-#define QETH_TR_MAC_NC       0xc000 /* non-canonical */
-#define QETH_TR_MAC_C        0x0300 /* canonical */
-
 /**
  * buffer stuff for read channel
  */
@@ -632,7 +642,7 @@ struct qeth_reply {
 	int rc;
 	void *param;
 	struct qeth_card *card;
-	atomic_t refcnt;
+	refcount_t refcnt;
 };
 
 struct qeth_card_blkt {
@@ -846,14 +856,16 @@ static inline int qeth_get_micros(void)
 
 static inline int qeth_get_ip_version(struct sk_buff *skb)
 {
-	__be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
+	__be16 prot = veth->h_vlan_proto;
 
-	if (be16_to_cpu(*p) == ETH_P_8021Q)
-		p += 2;
-	switch (be16_to_cpu(*p)) {
-	case ETH_P_IPV6:
+	if (prot == htons(ETH_P_8021Q))
+		prot = veth->h_vlan_encapsulated_proto;
+
+	switch (prot) {
+	case htons(ETH_P_IPV6):
 		return 6;
-	case ETH_P_IP:
+	case htons(ETH_P_IP):
 		return 4;
 	default:
 		return 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3614df6..6abd3bc 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -36,6 +36,7 @@
 #include <asm/diag.h>
 #include <asm/cio.h>
 #include <asm/ccwdev.h>
+#include <asm/cpcmd.h>
 
 #include "qeth_core.h"
 
@@ -564,7 +565,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
 	reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
 	if (reply) {
-		atomic_set(&reply->refcnt, 1);
+		refcount_set(&reply->refcnt, 1);
 		atomic_set(&reply->received, 0);
 		reply->card = card;
 	}
@@ -573,14 +574,12 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
 
 static void qeth_get_reply(struct qeth_reply *reply)
 {
-	WARN_ON(atomic_read(&reply->refcnt) <= 0);
-	atomic_inc(&reply->refcnt);
+	refcount_inc(&reply->refcnt);
 }
 
 static void qeth_put_reply(struct qeth_reply *reply)
 {
-	WARN_ON(atomic_read(&reply->refcnt) <= 0);
-	if (atomic_dec_and_test(&reply->refcnt))
+	if (refcount_dec_and_test(&reply->refcnt))
 		kfree(reply);
 }
 
@@ -1717,23 +1716,87 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
 			       (prcd[0x11] == _ascebc['M']));
 }
 
+static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
+{
+	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+	struct diag26c_vnic_resp *response = NULL;
+	struct diag26c_vnic_req *request = NULL;
+	struct ccw_dev_id id;
+	char userid[80];
+	int rc = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "vmlayer");
+
+	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
+	if (rc)
+		goto out;
+
+	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
+	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
+	if (!request || !response) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	ccw_device_get_id(CARD_RDEV(card), &id);
+	request->resp_buf_len = sizeof(*response);
+	request->resp_version = DIAG26C_VERSION6_VM65918;
+	request->req_format = DIAG26C_VNIC_INFO;
+	ASCEBC(userid, 8);
+	memcpy(&request->sys_name, userid, 8);
+	request->devno = id.devno;
+
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
+	if (rc)
+		goto out;
+	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
+
+	if (request->resp_buf_len < sizeof(*response) ||
+	    response->version != request->resp_version) {
+		rc = -EIO;
+		goto out;
+	}
+
+	if (response->protocol == VNIC_INFO_PROT_L2)
+		disc = QETH_DISCIPLINE_LAYER2;
+	else if (response->protocol == VNIC_INFO_PROT_L3)
+		disc = QETH_DISCIPLINE_LAYER3;
+
+out:
+	kfree(response);
+	kfree(request);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "err%x", rc);
+	return disc;
+}
+
 /* Determine whether the device requires a specific layer discipline */
 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
 {
+	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
+
 	if (card->info.type == QETH_CARD_TYPE_OSM ||
-	    card->info.type == QETH_CARD_TYPE_OSN) {
+	    card->info.type == QETH_CARD_TYPE_OSN)
+		disc = QETH_DISCIPLINE_LAYER2;
+	else if (card->info.guestlan)
+		disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
+				QETH_DISCIPLINE_LAYER3 :
+				qeth_vm_detect_layer(card);
+
+	switch (disc) {
+	case QETH_DISCIPLINE_LAYER2:
 		QETH_DBF_TEXT(SETUP, 3, "force l2");
-		return QETH_DISCIPLINE_LAYER2;
-	}
-
-	/* virtual HiperSocket is L3 only: */
-	if (card->info.guestlan && card->info.type == QETH_CARD_TYPE_IQD) {
+		break;
+	case QETH_DISCIPLINE_LAYER3:
 		QETH_DBF_TEXT(SETUP, 3, "force l3");
-		return QETH_DISCIPLINE_LAYER3;
+		break;
+	default:
+		QETH_DBF_TEXT(SETUP, 3, "force no");
 	}
 
-	QETH_DBF_TEXT(SETUP, 3, "force no");
-	return QETH_DISCIPLINE_UNDETERMINED;
+	return disc;
 }
 
 static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
@@ -4218,9 +4281,8 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (!card->options.layer2 ||
 	    !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
-		memcpy(card->dev->dev_addr,
-		       &cmd->data.setadapterparms.data.change_addr.addr,
-		       OSA_ADDR_LEN);
+		ether_addr_copy(card->dev->dev_addr,
+				cmd->data.setadapterparms.data.change_addr.addr);
 		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
 	}
 	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
@@ -4242,9 +4304,9 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
 	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
-	cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
-	memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
-	       card->dev->dev_addr, OSA_ADDR_LEN);
+	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
+	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
+			card->dev->dev_addr);
 	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
 			       NULL);
 	return rc;
@@ -4789,9 +4851,12 @@ int qeth_vm_request_mac(struct qeth_card *card)
 	request->op_code = DIAG26C_GET_MAC;
 	request->devno = id.devno;
 
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
 	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
+	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
 	if (rc)
 		goto out;
+	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
 
 	if (request->resp_buf_len < sizeof(*response) ||
 	    response->version != request->resp_version) {
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index ff6877f..619f897 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -10,6 +10,7 @@
 #define __QETH_CORE_MPC_H__
 
 #include <asm/qeth.h>
+#include <uapi/linux/if_ether.h>
 
 #define IPA_PDU_HEADER_SIZE	0x40
 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
@@ -25,7 +26,6 @@ extern unsigned char IPA_PDU_HEADER[];
 #define QETH_SEQ_NO_LENGTH	4
 #define QETH_MPC_TOKEN_LENGTH	4
 #define QETH_MCL_LENGTH		4
-#define OSA_ADDR_LEN		6
 
 #define QETH_TIMEOUT		(10 * HZ)
 #define QETH_IPA_TIMEOUT	(45 * HZ)
@@ -416,12 +416,11 @@ struct qeth_query_cmds_supp {
 } __attribute__ ((packed));
 
 struct qeth_change_addr {
-	__u32 cmd;
-	__u32 addr_size;
-	__u32 no_macs;
-	__u8 addr[OSA_ADDR_LEN];
-} __attribute__ ((packed));
-
+	u32 cmd;
+	u32 addr_size;
+	u32 no_macs;
+	u8 addr[ETH_ALEN];
+};
 
 struct qeth_snmp_cmd {
 	__u8  token[16];
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 09b1c4e..f213005 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -22,8 +22,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
 bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
 
 struct qeth_mac {
-	u8 mac_addr[OSA_ADDR_LEN];
-	u8 is_uc:1;
+	u8 mac_addr[ETH_ALEN];
 	u8 disp_flag:2;
 	struct hlist_node hnode;
 };
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 5863ea1..7f23644 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -109,8 +109,8 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
 	if (!iob)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-	cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
-	memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+	cmd->data.setdelmac.mac_length = ETH_ALEN;
+	ether_addr_copy(cmd->data.setdelmac.mac, mac);
 	return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob,
 					   NULL, NULL));
 }
@@ -123,7 +123,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 	rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
 	if (rc == 0) {
 		card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-		memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+		ether_addr_copy(card->dev->dev_addr, mac);
 		dev_info(&card->gdev->dev,
 			"MAC address %pM successfully registered on device %s\n",
 			card->dev->dev_addr, card->dev->name);
@@ -156,54 +156,37 @@ static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
 	return rc;
 }
 
-static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
+	enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+					IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
 	int rc;
 
-	QETH_CARD_TEXT(card, 2, "L2Sgmac");
-	rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
+	QETH_CARD_TEXT(card, 2, "L2Wmac");
+	rc = qeth_l2_send_setdelmac(card, mac, cmd);
 	if (rc == -EEXIST)
-		QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
-			mac, QETH_CARD_IFNAME(card));
+		QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
+				 mac, QETH_CARD_IFNAME(card));
 	else if (rc)
-		QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
-			mac, QETH_CARD_IFNAME(card), rc);
+		QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
+				 mac, QETH_CARD_IFNAME(card), rc);
 	return rc;
 }
 
-static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
+	enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+					IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
 	int rc;
 
-	QETH_CARD_TEXT(card, 2, "L2Dgmac");
-	rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
+	QETH_CARD_TEXT(card, 2, "L2Rmac");
+	rc = qeth_l2_send_setdelmac(card, mac, cmd);
 	if (rc)
-		QETH_DBF_MESSAGE(2,
-			"Could not delete group MAC %pM on %s: %d\n",
-			mac, QETH_CARD_IFNAME(card), rc);
+		QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
+				 mac, QETH_CARD_IFNAME(card), rc);
 	return rc;
 }
 
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-	if (mac->is_uc) {
-		return qeth_l2_send_setdelmac(card, mac->mac_addr,
-						IPA_CMD_SETVMAC);
-	} else {
-		return qeth_l2_send_setgroupmac(card, mac->mac_addr);
-	}
-}
-
-static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
-{
-	if (mac->is_uc) {
-		return qeth_l2_send_setdelmac(card, mac->mac_addr,
-						IPA_CMD_DELVMAC);
-	} else {
-		return qeth_l2_send_delgroupmac(card, mac->mac_addr);
-	}
-}
-
 static void qeth_l2_del_all_macs(struct qeth_card *card)
 {
 	struct qeth_mac *mac;
@@ -549,7 +532,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
 		QETH_CARD_TEXT(card, 3, "setmcTYP");
 		return -EOPNOTSUPP;
 	}
-	QETH_CARD_HEX(card, 3, addr->sa_data, OSA_ADDR_LEN);
+	QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
 	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
 		QETH_CARD_TEXT(card, 3, "setmcREC");
 		return -ERESTARTSYS;
@@ -597,27 +580,23 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
  * only if there is not in the hash table storage already
  *
 */
-static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha,
-			    u8 is_uc)
+static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
 {
 	u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
 	struct qeth_mac *mac;
 
 	hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
-		if (is_uc == mac->is_uc &&
-		    !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+		if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
 			mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
 			return;
 		}
 	}
 
 	mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
-
 	if (!mac)
 		return;
 
-	memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
-	mac->is_uc = is_uc;
+	ether_addr_copy(mac->mac_addr, ha->addr);
 	mac->disp_flag = QETH_DISP_ADDR_ADD;
 
 	hash_add(card->mac_htable, &mac->hnode, mac_hash);
@@ -643,26 +622,29 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
 	spin_lock_bh(&card->mclock);
 
 	netdev_for_each_mc_addr(ha, dev)
-		qeth_l2_add_mac(card, ha, 0);
-
+		qeth_l2_add_mac(card, ha);
 	netdev_for_each_uc_addr(ha, dev)
-		qeth_l2_add_mac(card, ha, 1);
+		qeth_l2_add_mac(card, ha);
 
 	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
-		if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
-			qeth_l2_remove_mac(card, mac);
+		switch (mac->disp_flag) {
+		case QETH_DISP_ADDR_DELETE:
+			qeth_l2_remove_mac(card, mac->mac_addr);
 			hash_del(&mac->hnode);
 			kfree(mac);
-
-		} else if (mac->disp_flag == QETH_DISP_ADDR_ADD) {
-			rc = qeth_l2_write_mac(card, mac);
+			break;
+		case QETH_DISP_ADDR_ADD:
+			rc = qeth_l2_write_mac(card, mac->mac_addr);
 			if (rc) {
 				hash_del(&mac->hnode);
 				kfree(mac);
-			} else
-				mac->disp_flag = QETH_DISP_ADDR_DELETE;
-		} else
+				break;
+			}
+			/* fall through */
+		default:
+			/* for next call to set_rx_mode(): */
 			mac->disp_flag = QETH_DISP_ADDR_DELETE;
+		}
 	}
 
 	spin_unlock_bh(&card->mclock);
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index e583383..bdd45f4 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -29,7 +29,7 @@ struct qeth_ipaddr {
 	 */
 	int  ref_counter;
 	enum qeth_prot_versions proto;
-	unsigned char mac[OSA_ADDR_LEN];
+	unsigned char mac[ETH_ALEN];
 	union {
 		struct {
 			unsigned int addr;
@@ -69,19 +69,20 @@ struct qeth_ipato_entry {
 extern const struct attribute_group *qeth_l3_attr_groups[];
 
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
-int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
 int qeth_l3_create_device_attributes(struct device *);
 void qeth_l3_remove_device_attributes(struct device *);
 int qeth_l3_setrouting_v4(struct qeth_card *);
 int qeth_l3_setrouting_v6(struct qeth_card *);
 int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
-void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
-			u8 *, int);
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+			    enum qeth_prot_versions proto, u8 *addr,
+			    int mask_bits);
 int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr);
 int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
-			const u8 *);
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr);
 void qeth_l3_update_ipato(struct qeth_card *card);
 struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
 int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ef0961e..b0c888e8 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -18,15 +18,20 @@
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
 #include <linux/ip.h>
+#include <linux/in.h>
 #include <linux/ipv6.h>
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/slab.h>
+#include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/skbuff.h>
 
 #include <net/ip.h>
 #include <net/arp.h>
 #include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
 #include <net/ip6_fib.h>
 #include <net/ip6_checksum.h>
 #include <net/iucv/af_iucv.h>
@@ -37,99 +42,22 @@
 
 static int qeth_l3_set_offline(struct ccwgroup_device *);
 static int qeth_l3_stop(struct net_device *);
-static void qeth_l3_set_multicast_list(struct net_device *);
+static void qeth_l3_set_rx_mode(struct net_device *dev);
 static int qeth_l3_register_addr_entry(struct qeth_card *,
 		struct qeth_ipaddr *);
 static int qeth_l3_deregister_addr_entry(struct qeth_card *,
 		struct qeth_ipaddr *);
 
-static int qeth_l3_isxdigit(char *buf)
-{
-	while (*buf) {
-		if (!isxdigit(*buf++))
-			return 0;
-	}
-	return 1;
-}
-
 static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf)
 {
 	sprintf(buf, "%pI4", addr);
 }
 
-static int qeth_l3_string_to_ipaddr4(const char *buf, __u8 *addr)
-{
-	int count = 0, rc = 0;
-	unsigned int in[4];
-	char c;
-
-	rc = sscanf(buf, "%u.%u.%u.%u%c",
-		    &in[0], &in[1], &in[2], &in[3], &c);
-	if (rc != 4 && (rc != 5 || c != '\n'))
-		return -EINVAL;
-	for (count = 0; count < 4; count++) {
-		if (in[count] > 255)
-			return -EINVAL;
-		addr[count] = in[count];
-	}
-	return 0;
-}
-
 static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf)
 {
 	sprintf(buf, "%pI6", addr);
 }
 
-static int qeth_l3_string_to_ipaddr6(const char *buf, __u8 *addr)
-{
-	const char *end, *end_tmp, *start;
-	__u16 *in;
-	char num[5];
-	int num2, cnt, out, found, save_cnt;
-	unsigned short in_tmp[8] = {0, };
-
-	cnt = out = found = save_cnt = num2 = 0;
-	end = start = buf;
-	in = (__u16 *) addr;
-	memset(in, 0, 16);
-	while (*end) {
-		end = strchr(start, ':');
-		if (end == NULL) {
-			end = buf + strlen(buf);
-			end_tmp = strchr(start, '\n');
-			if (end_tmp != NULL)
-				end = end_tmp;
-			out = 1;
-		}
-		if ((end - start)) {
-			memset(num, 0, 5);
-			if ((end - start) > 4)
-				return -EINVAL;
-			memcpy(num, start, end - start);
-			if (!qeth_l3_isxdigit(num))
-				return -EINVAL;
-			sscanf(start, "%x", &num2);
-			if (found)
-				in_tmp[save_cnt++] = num2;
-			else
-				in[cnt++] = num2;
-			if (out)
-				break;
-		} else {
-			if (found)
-				return -EINVAL;
-			found = 1;
-		}
-		start = ++end;
-	}
-	if (cnt + save_cnt > 8)
-		return -EINVAL;
-	cnt = 7;
-	while (save_cnt)
-		in[cnt--] = in_tmp[--save_cnt];
-	return 0;
-}
-
 void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
 				char *buf)
 {
@@ -139,17 +67,6 @@ void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr,
 		qeth_l3_ipaddr6_to_string(addr, buf);
 }
 
-int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
-				__u8 *addr)
-{
-	if (proto == QETH_PROT_IPV4)
-		return qeth_l3_string_to_ipaddr4(buf, addr);
-	else if (proto == QETH_PROT_IPV6)
-		return qeth_l3_string_to_ipaddr6(buf, addr);
-	else
-		return -EINVAL;
-}
-
 static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
 {
 	int i, j;
@@ -207,8 +124,8 @@ inline int
 qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
 {
 	return addr1->proto == addr2->proto &&
-		!memcmp(&addr1->u, &addr2->u, sizeof(addr1->u))  &&
-		!memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
+	       !memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
+	       ether_addr_equal_64bits(addr1->mac, addr2->mac);
 }
 
 static struct qeth_ipaddr *
@@ -446,7 +363,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
 	if (!iob)
 		return -ENOMEM;
 	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-	memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
+	ether_addr_copy(cmd->data.setdelipm.mac, addr->mac);
 	if (addr->proto == QETH_PROT_IPV6)
 		memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr,
 		       sizeof(struct in6_addr));
@@ -582,7 +499,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 3, "setrtg6");
-#ifdef CONFIG_QETH_IPV6
 
 	if (!qeth_is_supported(card, IPA_IPV6))
 		return 0;
@@ -599,7 +515,6 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
 			" on %s. Type set to 'no router'.\n", rc,
 			QETH_CARD_IFNAME(card));
 	}
-#endif
 	return rc;
 }
 
@@ -673,10 +588,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
 	return rc;
 }
 
-void qeth_l3_del_ipato_entry(struct qeth_card *card,
-		enum qeth_prot_versions proto, u8 *addr, int mask_bits)
+int qeth_l3_del_ipato_entry(struct qeth_card *card,
+			    enum qeth_prot_versions proto, u8 *addr,
+			    int mask_bits)
 {
 	struct qeth_ipato_entry *ipatoe, *tmp;
+	int rc = -ENOENT;
 
 	QETH_CARD_TEXT(card, 2, "delipato");
 
@@ -691,10 +608,12 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
 			list_del(&ipatoe->entry);
 			qeth_l3_update_ipato(card);
 			kfree(ipatoe);
+			rc = 0;
 		}
 	}
 
 	spin_unlock_bh(&card->ip_lock);
+	return rc;
 }
 
 /*
@@ -704,7 +623,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 	      const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
-	int rc = 0;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -728,7 +647,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 	if (qeth_l3_ip_from_hash(card, ipaddr))
 		rc = -EEXIST;
 	else
-		qeth_l3_add_ip(card, ipaddr);
+		rc = qeth_l3_add_ip(card, ipaddr);
 
 	spin_unlock_bh(&card->ip_lock);
 
@@ -737,10 +656,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 	return rc;
 }
 
-void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
-	      const u8 *addr)
+int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -755,13 +675,14 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
 		}
 		ipaddr->type = QETH_IP_TYPE_VIPA;
 	} else
-		return;
+		return -ENOMEM;
 
 	spin_lock_bh(&card->ip_lock);
-	qeth_l3_delete_ip(card, ipaddr);
+	rc = qeth_l3_delete_ip(card, ipaddr);
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(ipaddr);
+	return rc;
 }
 
 /*
@@ -771,7 +692,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 	      const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
-	int rc = 0;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -796,7 +717,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 	if (qeth_l3_ip_from_hash(card, ipaddr))
 		rc = -EEXIST;
 	else
-		qeth_l3_add_ip(card, ipaddr);
+		rc = qeth_l3_add_ip(card, ipaddr);
 
 	spin_unlock_bh(&card->ip_lock);
 
@@ -805,10 +726,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 	return rc;
 }
 
-void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
-			const u8 *addr)
+int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
+		     const u8 *addr)
 {
 	struct qeth_ipaddr *ipaddr;
+	int rc;
 
 	ipaddr = qeth_l3_get_addr_buffer(proto);
 	if (ipaddr) {
@@ -823,13 +745,14 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
 		}
 		ipaddr->type = QETH_IP_TYPE_RXIP;
 	} else
-		return;
+		return -ENOMEM;
 
 	spin_lock_bh(&card->ip_lock);
-	qeth_l3_delete_ip(card, ipaddr);
+	rc = qeth_l3_delete_ip(card, ipaddr);
 	spin_unlock_bh(&card->ip_lock);
 
 	kfree(ipaddr);
+	return rc;
 }
 
 static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -896,27 +819,6 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
 	return rc;
 }
 
-static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type)
-{
-	if (cast_type == RTN_MULTICAST)
-		return QETH_CAST_MULTICAST;
-	if (cast_type == RTN_BROADCAST)
-		return QETH_CAST_BROADCAST;
-	return QETH_CAST_UNICAST;
-}
-
-static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
-{
-	u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6;
-	if (cast_type == RTN_MULTICAST)
-		return ct | QETH_CAST_MULTICAST;
-	if (cast_type == RTN_ANYCAST)
-		return ct | QETH_CAST_ANYCAST;
-	if (cast_type == RTN_BROADCAST)
-		return ct | QETH_CAST_BROADCAST;
-	return ct | QETH_CAST_UNICAST;
-}
-
 static int qeth_l3_setadapter_parms(struct qeth_card *card)
 {
 	int rc = 0;
@@ -933,7 +835,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
 	return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
 		enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
 {
@@ -949,7 +850,6 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
 				   qeth_setassparms_cb, NULL);
 	return rc;
 }
-#endif
 
 static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
 {
@@ -1045,7 +945,6 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
 	return rc;
 }
 
-#ifdef CONFIG_QETH_IPV6
 static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
 {
 	int rc;
@@ -1091,12 +990,9 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
 	dev_info(&card->gdev->dev, "IPV6 enabled\n");
 	return 0;
 }
-#endif
 
 static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
 {
-	int rc = 0;
-
 	QETH_CARD_TEXT(card, 3, "strtipv6");
 
 	if (!qeth_is_supported(card, IPA_IPV6)) {
@@ -1104,10 +1000,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
 			"IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
 		return 0;
 	}
-#ifdef CONFIG_QETH_IPV6
-	rc = qeth_l3_softsetup_ipv6(card);
-#endif
-	return rc ;
+	return qeth_l3_softsetup_ipv6(card);
 }
 
 static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
@@ -1179,8 +1072,8 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
 
 	cmd = (struct qeth_ipa_cmd *) data;
 	if (cmd->hdr.return_code == 0)
-		memcpy(card->dev->dev_addr,
-			cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
+		ether_addr_copy(card->dev->dev_addr,
+				cmd->data.create_destroy_addr.unique_id);
 	else
 		eth_random_addr(card->dev->dev_addr);
 
@@ -1328,81 +1221,22 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
 	return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
 }
 
-static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac)
-{
-	ip_eth_mc_map(ipm, mac);
-}
-
-static void qeth_l3_mark_all_mc_to_be_deleted(struct qeth_card *card)
-{
-	struct qeth_ipaddr *addr;
-	int i;
-
-	hash_for_each(card->ip_mc_htable, i, addr, hnode)
-		addr->disp_flag = QETH_DISP_ADDR_DELETE;
-
-}
-
-static void qeth_l3_add_all_new_mc(struct qeth_card *card)
-{
-	struct qeth_ipaddr *addr;
-	struct hlist_node *tmp;
-	int i;
-	int rc;
-
-	hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-		if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
-			rc = qeth_l3_register_addr_entry(card, addr);
-			if (!rc || (rc == IPA_RC_LAN_OFFLINE))
-				addr->ref_counter = 1;
-			else {
-				hash_del(&addr->hnode);
-				kfree(addr);
-			}
-		}
-	}
-
-}
-
-static void qeth_l3_delete_nonused_mc(struct qeth_card *card)
-{
-	struct qeth_ipaddr *addr;
-	struct hlist_node *tmp;
-	int i;
-	int rc;
-
-	hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
-		if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
-			rc = qeth_l3_deregister_addr_entry(card, addr);
-			if (!rc || (rc == IPA_RC_MC_ADDR_NOT_FOUND)) {
-				hash_del(&addr->hnode);
-				kfree(addr);
-			}
-		}
-	}
-
-}
-
-
 static void
 qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 {
 	struct ip_mc_list *im4;
 	struct qeth_ipaddr *tmp, *ipm;
-	char buf[MAX_ADDR_LEN];
 
 	QETH_CARD_TEXT(card, 4, "addmc");
 
 	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
-		if (!tmp)
-			return;
+	if (!tmp)
+		return;
 
 	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
 	     im4 = rcu_dereference(im4->next_rcu)) {
-		qeth_l3_get_mac_for_ipm(im4->multiaddr, buf);
-
+		ip_eth_mc_map(im4->multiaddr, tmp->mac);
 		tmp->u.a4.addr = be32_to_cpu(im4->multiaddr);
-		memcpy(tmp->mac, buf, sizeof(tmp->mac));
 		tmp->is_multicast = 1;
 
 		ipm = qeth_l3_ip_from_hash(card, tmp);
@@ -1412,7 +1246,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
 			ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
 			if (!ipm)
 				continue;
-			memcpy(ipm->mac, buf, sizeof(tmp->mac));
+			ether_addr_copy(ipm->mac, tmp->mac);
 			ipm->u.a4.addr = be32_to_cpu(im4->multiaddr);
 			ipm->is_multicast = 1;
 			ipm->disp_flag = QETH_DISP_ADDR_ADD;
@@ -1466,25 +1300,21 @@ static void qeth_l3_add_multicast_ipv4(struct qeth_card *card)
 	rcu_read_unlock();
 }
 
-#ifdef CONFIG_QETH_IPV6
-static void
-qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
+static void qeth_l3_add_mc6_to_hash(struct qeth_card *card,
+				    struct inet6_dev *in6_dev)
 {
 	struct qeth_ipaddr *ipm;
 	struct ifmcaddr6 *im6;
 	struct qeth_ipaddr *tmp;
-	char buf[MAX_ADDR_LEN];
 
 	QETH_CARD_TEXT(card, 4, "addmc6");
 
 	tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
-		if (!tmp)
-			return;
+	if (!tmp)
+		return;
 
 	for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) {
-		ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0);
-
-		memcpy(tmp->mac, buf, sizeof(tmp->mac));
+		ipv6_eth_mc_map(&im6->mca_addr, tmp->mac);
 		memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr,
 		       sizeof(struct in6_addr));
 		tmp->is_multicast = 1;
@@ -1499,7 +1329,7 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
 		if (!ipm)
 			continue;
 
-		memcpy(ipm->mac, buf, OSA_ADDR_LEN);
+		ether_addr_copy(ipm->mac, tmp->mac);
 		memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr,
 		       sizeof(struct in6_addr));
 		ipm->is_multicast = 1;
@@ -1560,7 +1390,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card)
 	rcu_read_unlock();
 	in6_dev_put(in6_dev);
 }
-#endif /* CONFIG_QETH_IPV6 */
 
 static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
 			unsigned short vid)
@@ -1600,9 +1429,8 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
 }
 
 static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
-			unsigned short vid)
+					 unsigned short vid)
 {
-#ifdef CONFIG_QETH_IPV6
 	struct inet6_dev *in6_dev;
 	struct inet6_ifaddr *ifa;
 	struct qeth_ipaddr *addr;
@@ -1637,7 +1465,6 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
 	kfree(addr);
 out:
 	in6_dev_put(in6_dev);
-#endif /* CONFIG_QETH_IPV6 */
 }
 
 static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
@@ -1672,44 +1499,31 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
 	/* unregister IP addresses of vlan device */
 	qeth_l3_free_vlan_addresses(card, vid);
 	clear_bit(vid, card->active_vlans);
-	qeth_l3_set_multicast_list(card->dev);
+	qeth_l3_set_rx_mode(dev);
 	return 0;
 }
 
 static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
 				struct qeth_hdr *hdr)
 {
-	__u16 prot;
-	struct iphdr *ip_hdr;
-	unsigned char tg_addr[MAX_ADDR_LEN];
-
 	if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) {
-		prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
-			      ETH_P_IP;
+		u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
+								 ETH_P_IP;
+		unsigned char tg_addr[ETH_ALEN];
+
+		skb_reset_network_header(skb);
 		switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) {
 		case QETH_CAST_MULTICAST:
-			switch (prot) {
-#ifdef CONFIG_QETH_IPV6
-			case ETH_P_IPV6:
-				ndisc_mc_map((struct in6_addr *)
-				     skb->data + 24,
-				     tg_addr, card->dev, 0);
-				break;
-#endif
-			case ETH_P_IP:
-				ip_hdr = (struct iphdr *)skb->data;
-				ip_eth_mc_map(ip_hdr->daddr, tg_addr);
-				break;
-			default:
-				memcpy(tg_addr, card->dev->broadcast,
-					card->dev->addr_len);
-			}
+			if (prot == ETH_P_IP)
+				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
+			else
+				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
+
 			card->stats.multicast++;
 			skb->pkt_type = PACKET_MULTICAST;
 			break;
 		case QETH_CAST_BROADCAST:
-			memcpy(tg_addr, card->dev->broadcast,
-				card->dev->addr_len);
+			ether_addr_copy(tg_addr, card->dev->broadcast);
 			card->stats.multicast++;
 			skb->pkt_type = PACKET_BROADCAST;
 			break;
@@ -1721,12 +1535,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
 				skb->pkt_type = PACKET_OTHERHOST;
 			else
 				skb->pkt_type = PACKET_HOST;
-			memcpy(tg_addr, card->dev->dev_addr,
-				card->dev->addr_len);
+			ether_addr_copy(tg_addr, card->dev->dev_addr);
 		}
 		if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
 			card->dev->header_ops->create(skb, card->dev, prot,
-				tg_addr, &hdr->hdr.l3.dest_addr[2],
+				tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac,
 				card->dev->addr_len);
 		else
 			card->dev->header_ops->create(skb, card->dev, prot,
@@ -1741,7 +1554,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
 				      QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
 		u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
 				hdr->hdr.l3.vlan_id :
-				*((u16 *)&hdr->hdr.l3.dest_addr[12]);
+				hdr->hdr.l3.next_hop.rx.vlan_id;
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
 	}
 
@@ -1949,26 +1762,46 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
 	}
 }
 
-static void qeth_l3_set_multicast_list(struct net_device *dev)
+static void qeth_l3_set_rx_mode(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
+	struct qeth_ipaddr *addr;
+	struct hlist_node *tmp;
+	int i, rc;
 
 	QETH_CARD_TEXT(card, 3, "setmulti");
 	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
 	    (card->state != CARD_STATE_UP))
 		return;
 	if (!card->options.sniffer) {
-
 		spin_lock_bh(&card->mclock);
 
-		qeth_l3_mark_all_mc_to_be_deleted(card);
-
 		qeth_l3_add_multicast_ipv4(card);
-#ifdef CONFIG_QETH_IPV6
 		qeth_l3_add_multicast_ipv6(card);
-#endif
-		qeth_l3_delete_nonused_mc(card);
-		qeth_l3_add_all_new_mc(card);
+
+		hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+			switch (addr->disp_flag) {
+			case QETH_DISP_ADDR_DELETE:
+				rc = qeth_l3_deregister_addr_entry(card, addr);
+				if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) {
+					hash_del(&addr->hnode);
+					kfree(addr);
+				}
+				break;
+			case QETH_DISP_ADDR_ADD:
+				rc = qeth_l3_register_addr_entry(card, addr);
+				if (rc && rc != IPA_RC_LAN_OFFLINE) {
+					hash_del(&addr->hnode);
+					kfree(addr);
+					break;
+				}
+				addr->ref_counter = 1;
+				/* fall through */
+			default:
+				/* for next call to set_rx_mode(): */
+				addr->disp_flag = QETH_DISP_ADDR_DELETE;
+			}
+		}
 
 		spin_unlock_bh(&card->mclock);
 
@@ -2237,12 +2070,10 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
 			rc = -EFAULT;
 		goto free_and_out;
 	}
-#ifdef CONFIG_QETH_IPV6
 	if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
 		/* fails in case of GuestLAN QDIO mode */
 		qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo);
 	}
-#endif
 	if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
 		QETH_CARD_TEXT(card, 4, "qactf");
 		rc = -EFAULT;
@@ -2422,9 +2253,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 	return rc;
 }
 
-static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
+static int qeth_l3_get_cast_type(struct sk_buff *skb)
 {
-	int cast_type = RTN_UNSPEC;
 	struct neighbour *n = NULL;
 	struct dst_entry *dst;
 
@@ -2433,48 +2263,34 @@ static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
 	if (dst)
 		n = dst_neigh_lookup_skb(dst, skb);
 	if (n) {
-		cast_type = n->type;
+		int cast_type = n->type;
+
 		rcu_read_unlock();
 		neigh_release(n);
 		if ((cast_type == RTN_BROADCAST) ||
 		    (cast_type == RTN_MULTICAST) ||
 		    (cast_type == RTN_ANYCAST))
 			return cast_type;
-		else
-			return RTN_UNSPEC;
+		return RTN_UNSPEC;
 	}
 	rcu_read_unlock();
 
-	/* try something else */
+	/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
 	if (be16_to_cpu(skb->protocol) == ETH_P_IPV6)
-		return (skb_network_header(skb)[24] == 0xff) ?
-				RTN_MULTICAST : 0;
+		return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ?
+				RTN_MULTICAST : RTN_UNSPEC;
 	else if (be16_to_cpu(skb->protocol) == ETH_P_IP)
-		return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ?
-				RTN_MULTICAST : 0;
-	/* ... */
-	if (!memcmp(skb->data, skb->dev->broadcast, 6))
-		return RTN_BROADCAST;
-	else {
-		u16 hdr_mac;
+		return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
+				RTN_MULTICAST : RTN_UNSPEC;
 
-		hdr_mac = *((u16 *)skb->data);
-		/* tr multicast? */
-		switch (card->info.link_type) {
-		case QETH_LINK_TYPE_HSTR:
-		case QETH_LINK_TYPE_LANE_TR:
-			if ((hdr_mac == QETH_TR_MAC_NC) ||
-			    (hdr_mac == QETH_TR_MAC_C))
-				return RTN_MULTICAST;
-			break;
-		/* eth or so multicast? */
-		default:
-		if ((hdr_mac == QETH_ETH_MAC_V4) ||
-			    (hdr_mac == QETH_ETH_MAC_V6))
-				return RTN_MULTICAST;
-		}
-	}
-	return cast_type;
+	/* ... and MAC address */
+	if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast))
+		return RTN_BROADCAST;
+	if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
+		return RTN_MULTICAST;
+
+	/* default to unicast */
+	return RTN_UNSPEC;
 }
 
 static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
@@ -2494,17 +2310,27 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
 	daddr[0] = 0xfe;
 	daddr[1] = 0x80;
 	memcpy(&daddr[8], iucv_hdr->destUserID, 8);
-	memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
+	memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
+}
+
+static u8 qeth_l3_cast_type_to_flag(int cast_type)
+{
+	if (cast_type == RTN_MULTICAST)
+		return QETH_CAST_MULTICAST;
+	if (cast_type == RTN_ANYCAST)
+		return QETH_CAST_ANYCAST;
+	if (cast_type == RTN_BROADCAST)
+		return QETH_CAST_BROADCAST;
+	return QETH_CAST_UNICAST;
 }
 
 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
-		struct sk_buff *skb, int ipv, int cast_type)
+				struct sk_buff *skb, int ipv, int cast_type,
+				unsigned int data_len)
 {
-	struct dst_entry *dst;
-
 	memset(hdr, 0, sizeof(struct qeth_hdr));
 	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
-	hdr->hdr.l3.ext_flags = 0;
+	hdr->hdr.l3.length = data_len;
 
 	/*
 	 * before we're going to overwrite this location with next hop ip.
@@ -2518,44 +2344,40 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
 		hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb);
 	}
 
-	hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
+	/* OSA only: */
+	if (!ipv) {
+		hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
+		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
+					    skb->dev->broadcast))
+			hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
+		else
+			hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
+				QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+		return;
+	}
 
+	hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
 	rcu_read_lock();
-	dst = skb_dst(skb);
 	if (ipv == 4) {
-		struct rtable *rt = (struct rtable *) dst;
-		__be32 *pkey = &ip_hdr(skb)->daddr;
+		struct rtable *rt = skb_rtable(skb);
 
-		if (rt && rt->rt_gateway)
-			pkey = &rt->rt_gateway;
-
-		/* IPv4 */
-		hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
-		memset(hdr->hdr.l3.dest_addr, 0, 12);
-		*((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey;
-	} else if (ipv == 6) {
-		struct rt6_info *rt = (struct rt6_info *) dst;
-		struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
+		*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
+				rt_nexthop(rt, ip_hdr(skb)->daddr) :
+				ip_hdr(skb)->daddr;
+	} else {
+		/* IPv6 */
+		const struct rt6_info *rt = skb_rt6_info(skb);
+		const struct in6_addr *next_hop;
 
 		if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
-			pkey = &rt->rt6i_gateway;
+			next_hop = &rt->rt6i_gateway;
+		else
+			next_hop = &ipv6_hdr(skb)->daddr;
+		memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
 
-		/* IPv6 */
-		hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
-		if (card->info.type == QETH_CARD_TYPE_IQD)
-			hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
-		memcpy(hdr->hdr.l3.dest_addr, pkey, 16);
-	} else {
-		if (!memcmp(skb->data + sizeof(struct qeth_hdr),
-			    skb->dev->broadcast, 6)) {
-			/* broadcast? */
-			hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
-						QETH_HDR_PASSTHRU;
-		} else {
-			hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ?
-				QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU :
-				QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
-		}
+		hdr->hdr.l3.flags |= QETH_HDR_IPV6;
+		if (card->info.type != QETH_CARD_TYPE_IQD)
+			hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
 	}
 	rcu_read_unlock();
 }
@@ -2587,7 +2409,6 @@ static void qeth_tso_fill_header(struct qeth_card *card,
 
 	/*fix header to TSO values ...*/
 	hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
-	hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
 	/*set values which are fix for the first approach ...*/
 	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
 	hdr->ext.imb_hdr_no  = 1;
@@ -2655,7 +2476,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 	struct qeth_card *card = dev->ml_priv;
 	struct sk_buff *new_skb = NULL;
 	int ipv = qeth_get_ip_version(skb);
-	int cast_type = qeth_l3_get_cast_type(card, skb);
+	int cast_type = qeth_l3_get_cast_type(skb);
 	struct qeth_qdio_out_q *queue =
 		card->qdio.out_qs[card->qdio.do_prio_queueing
 			|| (cast_type && card->info.is_multicast_different) ?
@@ -2748,21 +2569,23 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 	if (use_tso) {
 		hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso));
 		memset(hdr, 0, sizeof(struct qeth_hdr_tso));
-		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
+		qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+				    new_skb->len - sizeof(struct qeth_hdr_tso));
 		qeth_tso_fill_header(card, hdr, new_skb);
 		hdr_elements++;
 	} else {
 		if (data_offset < 0) {
 			hdr = skb_push(new_skb, sizeof(struct qeth_hdr));
-			qeth_l3_fill_header(card, hdr, new_skb, ipv,
-						cast_type);
+			qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type,
+					    new_skb->len -
+					    sizeof(struct qeth_hdr));
 		} else {
 			if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV)
 				qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb);
 			else {
 				qeth_l3_fill_header(card, hdr, new_skb, ipv,
-							cast_type);
-				hdr->hdr.l3.length = new_skb->len - data_offset;
+						    cast_type,
+						    new_skb->len - data_offset);
 			}
 		}
 
@@ -2930,7 +2753,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
+	.ndo_set_rx_mode	= qeth_l3_set_rx_mode,
 	.ndo_do_ioctl		= qeth_do_ioctl,
 	.ndo_change_mtu		= qeth_change_mtu,
 	.ndo_fix_features	= qeth_fix_features,
@@ -2947,7 +2770,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
 	.ndo_start_xmit		= qeth_l3_hard_start_xmit,
 	.ndo_features_check	= qeth_features_check,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_rx_mode	= qeth_l3_set_multicast_list,
+	.ndo_set_rx_mode	= qeth_l3_set_rx_mode,
 	.ndo_do_ioctl		= qeth_do_ioctl,
 	.ndo_change_mtu		= qeth_change_mtu,
 	.ndo_fix_features	= qeth_fix_features,
@@ -3145,7 +2968,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 			__qeth_l3_open(card->dev);
 		else
 			dev_open(card->dev);
-		qeth_l3_set_multicast_list(card->dev);
+		qeth_l3_set_rx_mode(card->dev);
 		qeth_recover_features(card->dev);
 		rtnl_unlock();
 	}
@@ -3371,10 +3194,6 @@ static struct notifier_block qeth_l3_ip_notifier = {
 	NULL,
 };
 
-#ifdef CONFIG_QETH_IPV6
-/**
- * IPv6 event handler
- */
 static int qeth_l3_ip6_event(struct notifier_block *this,
 			     unsigned long event, void *ptr)
 {
@@ -3419,7 +3238,6 @@ static struct notifier_block qeth_l3_ip6_notifier = {
 	qeth_l3_ip6_event,
 	NULL,
 };
-#endif
 
 static int qeth_l3_register_notifiers(void)
 {
@@ -3429,35 +3247,25 @@ static int qeth_l3_register_notifiers(void)
 	rc = register_inetaddr_notifier(&qeth_l3_ip_notifier);
 	if (rc)
 		return rc;
-#ifdef CONFIG_QETH_IPV6
 	rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier);
 	if (rc) {
 		unregister_inetaddr_notifier(&qeth_l3_ip_notifier);
 		return rc;
 	}
-#else
-	pr_warn("There is no IPv6 support for the layer 3 discipline\n");
-#endif
 	return 0;
 }
 
 static void qeth_l3_unregister_notifiers(void)
 {
-
 	QETH_DBF_TEXT(SETUP, 5, "unregnot");
 	WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
-#ifdef CONFIG_QETH_IPV6
 	WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
-#endif /* QETH_IPV6 */
 }
 
 static int __init qeth_l3_init(void)
 {
-	int rc = 0;
-
 	pr_info("register layer 3 discipline\n");
-	rc = qeth_l3_register_notifiers();
-	return rc;
+	return qeth_l3_register_notifiers();
 }
 
 static void __exit qeth_l3_exit(void)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 6ea2b52..a645cfe 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -10,11 +10,23 @@
 #include <linux/slab.h>
 #include <asm/ebcdic.h>
 #include <linux/hashtable.h>
+#include <linux/inet.h>
 #include "qeth_l3.h"
 
 #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
 struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
 
+static int qeth_l3_string_to_ipaddr(const char *buf,
+				    enum qeth_prot_versions proto, u8 *addr)
+{
+	const char *end;
+
+	if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) ||
+	    (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end)))
+		return -EINVAL;
+	return 0;
+}
+
 static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
 			struct qeth_routing_info *route, char *buf)
 {
@@ -262,7 +274,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
 	struct qeth_card *card = dev_get_drvdata(dev);
 	struct qeth_ipaddr *addr;
 	char *tmp;
-	int i;
+	int rc, i;
 
 	if (!card)
 		return -EINVAL;
@@ -331,11 +343,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
 		return -ENOMEM;
 
 	spin_lock_bh(&card->ip_lock);
-	qeth_l3_add_ip(card, addr);
+	rc = qeth_l3_add_ip(card, addr);
 	spin_unlock_bh(&card->ip_lock);
 	kfree(addr);
 
-	return count;
+	return rc ? rc : count;
 }
 
 static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
@@ -573,7 +585,7 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
 	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
 	if (!rc)
-		qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
+		rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -693,22 +705,25 @@ static const struct attribute_group qeth_device_ipato_group = {
 	.attrs = qeth_ipato_device_attrs,
 };
 
-static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
-			enum qeth_prot_versions proto)
+static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
+				       enum qeth_prot_versions proto,
+				       enum qeth_ip_types type)
 {
+	struct qeth_card *card = dev_get_drvdata(dev);
 	struct qeth_ipaddr *ipaddr;
 	char addr_str[40];
 	int str_len = 0;
 	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
 	int i;
 
+	if (!card)
+		return -EINVAL;
+
 	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
 	entry_len += 2; /* \n + terminator */
 	spin_lock_bh(&card->ip_lock);
 	hash_for_each(card->ip_htable, i, ipaddr, hnode) {
-		if (ipaddr->proto != proto)
-			continue;
-		if (ipaddr->type != QETH_IP_TYPE_VIPA)
+		if (ipaddr->proto != proto || ipaddr->type != type)
 			continue;
 		/* String must not be longer than PAGE_SIZE. So we check if
 		 * string length gets near PAGE_SIZE. Then we can savely display
@@ -727,14 +742,11 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
 }
 
 static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+				       QETH_IP_TYPE_VIPA);
 }
 
 static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
@@ -784,7 +796,7 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
 	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_vipae(buf, proto, addr);
 	if (!rc)
-		qeth_l3_del_vipa(card, proto, addr);
+		rc = qeth_l3_del_vipa(card, proto, addr);
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -804,14 +816,11 @@ static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
 			qeth_l3_dev_vipa_del4_store);
 
 static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+				       QETH_IP_TYPE_VIPA);
 }
 
 static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
@@ -856,48 +865,12 @@ static const struct attribute_group qeth_device_vipa_group = {
 	.attrs = qeth_vipa_device_attrs,
 };
 
-static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
-		       enum qeth_prot_versions proto)
-{
-	struct qeth_ipaddr *ipaddr;
-	char addr_str[40];
-	int str_len = 0;
-	int entry_len; /* length of 1 entry string, differs between v4 and v6 */
-	int i;
-
-	entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
-	entry_len += 2; /* \n + terminator */
-	spin_lock_bh(&card->ip_lock);
-	hash_for_each(card->ip_htable, i, ipaddr, hnode) {
-		if (ipaddr->proto != proto)
-			continue;
-		if (ipaddr->type != QETH_IP_TYPE_RXIP)
-			continue;
-		/* String must not be longer than PAGE_SIZE. So we check if
-		 * string length gets near PAGE_SIZE. Then we can savely display
-		 * the next IPv6 address (worst case, compared to IPv4) */
-		if ((PAGE_SIZE - str_len) <= entry_len)
-			break;
-		qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
-			addr_str);
-		str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
-				    addr_str);
-	}
-	spin_unlock_bh(&card->ip_lock);
-	str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
-
-	return str_len;
-}
-
 static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
-			struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4,
+				       QETH_IP_TYPE_RXIP);
 }
 
 static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
@@ -964,7 +937,7 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
 	mutex_lock(&card->conf_mutex);
 	rc = qeth_l3_parse_rxipe(buf, proto, addr);
 	if (!rc)
-		qeth_l3_del_rxip(card, proto, addr);
+		rc = qeth_l3_del_rxip(card, proto, addr);
 	mutex_unlock(&card->conf_mutex);
 	return rc ? rc : count;
 }
@@ -984,14 +957,11 @@ static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
 			qeth_l3_dev_rxip_del4_store);
 
 static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
-		struct device_attribute *attr, char *buf)
+					  struct device_attribute *attr,
+					  char *buf)
 {
-	struct qeth_card *card = dev_get_drvdata(dev);
-
-	if (!card)
-		return -EINVAL;
-
-	return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
+	return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6,
+				       QETH_IP_TYPE_RXIP);
 }
 
 static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 7d91e53..a980ef7 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 				u32 task_retry_id,
 				u8 fcp_cmd_payload[32])
 {
-	struct fcoe_task_context *ctx = task_params->context;
+	struct e4_fcoe_task_context *ctx = task_params->context;
+	const u8 val_byte = ctx->ystorm_ag_context.byte0;
+	struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct ystorm_fcoe_task_st_ctx *y_st_ctx;
 	struct tstorm_fcoe_task_st_ctx *t_st_ctx;
-	struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct mstorm_fcoe_task_st_ctx *m_st_ctx;
 	u32 io_size, val;
 	bool slow_sgl;
 
 	memset(ctx, 0, sizeof(*(ctx)));
+	ctx->ystorm_ag_context.byte0 = val_byte;
 	slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
 				    sgl_task_params->small_mid_sge);
 	io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
@@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 	y_st_ctx = &ctx->ystorm_st_context;
 	y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
 	y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
-	y_st_ctx->task_type = task_params->task_type;
+	y_st_ctx->task_type = (u8)task_params->task_type;
 	memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
 	       fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
 
 	/* Tstorm ctx */
 	t_st_ctx = &ctx->tstorm_st_context;
-	t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
-					FCOE_TASK_DEV_TYPE_TAPE :
-					FCOE_TASK_DEV_TYPE_DISK);
+	t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
+					    FCOE_TASK_DEV_TYPE_TAPE :
+					    FCOE_TASK_DEV_TYPE_DISK);
 	t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
 	val = cpu_to_le32(task_params->cq_rss_number);
 	t_st_ctx->read_only.glbl_q_num = val;
 	t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
-	t_st_ctx->read_only.task_type = task_params->task_type;
+	t_st_ctx->read_only.task_type = (u8)task_params->task_type;
 	SET_FIELD(t_st_ctx->read_write.flags,
 		  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
 	t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 		SET_FIELD(m_st_ctx->flags,
 			  MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
 			  (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+		m_st_ctx->sgl_params.sgl_num_sges =
+			cpu_to_le16(sgl_task_params->num_sges);
 	} else {
 		/* Tstorm ctx */
 		SET_FIELD(t_st_ctx->read_write.flags,
@@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
 				      sgl_task_params);
 	}
 
+	/* Init Sqe */
 	init_common_sqe(task_params, SEND_FCOE_CMD);
+
 	return 0;
 }
 
@@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task(
 	struct scsi_sgl_task_params *rx_sgl_task_params,
 	u8 fw_to_place_fc_header)
 {
-	struct fcoe_task_context *ctx = task_params->context;
+	struct e4_fcoe_task_context *ctx = task_params->context;
+	const u8 val_byte = ctx->ystorm_ag_context.byte0;
+	struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct ystorm_fcoe_task_st_ctx *y_st_ctx;
 	struct tstorm_fcoe_task_st_ctx *t_st_ctx;
-	struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
 	struct mstorm_fcoe_task_st_ctx *m_st_ctx;
 	u32 val;
 
 	memset(ctx, 0, sizeof(*(ctx)));
+	ctx->ystorm_ag_context.byte0 = val_byte;
 
 	/* Init Ystorm */
 	y_st_ctx = &ctx->ystorm_st_context;
@@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
 	SET_FIELD(y_st_ctx->sgl_mode,
 		  YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
 	y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
-	y_st_ctx->task_type = task_params->task_type;
+	y_st_ctx->task_type = (u8)task_params->task_type;
 	memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
 	       mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
 
@@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
 	t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
 	val = cpu_to_le32(task_params->cq_rss_number);
 	t_st_ctx->read_only.glbl_q_num = val;
-	t_st_ctx->read_only.task_type = task_params->task_type;
+	t_st_ctx->read_only.task_type = (u8)task_params->task_type;
 	SET_FIELD(t_st_ctx->read_write.flags,
 		  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
 	t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
@@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
 }
 
 int init_initiator_sequence_recovery_fcoe_task(
-	struct fcoe_task_params *task_params, u32 off)
+	struct fcoe_task_params *task_params, u32 desired_offset)
 {
 	init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
-	task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+	task_params->sqe->additional_info_union.seq_rec_updated_offset =
+								desired_offset;
 	return 0;
 }
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index f9c50fa..b5c236e 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -13,7 +13,7 @@
 
 struct fcoe_task_params {
 	/* Output parameter [set/filled by the HSI function] */
-	struct fcoe_task_context *context;
+	struct e4_fcoe_task_context *context;
 
 	/* Output parameter [set/filled by the HSI function] */
 	struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 9bf7b22..c105a2e 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -129,7 +129,7 @@ struct qedf_ioreq {
 	struct delayed_work timeout_work;
 	struct completion tm_done;
 	struct completion abts_done;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	struct fcoe_task_params *task_params;
 	struct scsi_sgl_task_params *sgl_task_params;
 	int idx;
@@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
 	unsigned int timer_msec);
 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
 extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
-	struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
 extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
 extern void qedf_ring_doorbell(struct qedf_rport *fcport);
 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59c18ca..aa22b11 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
 	struct qedf_ioreq *els_req;
 	struct qedf_mp_req *mp_req;
 	struct fc_frame_header *fc_hdr;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	int rc = 0;
 	uint32_t did, sid;
 	uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index 7faef80..503c1ae 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -225,19 +225,6 @@ enum fcoe_cqe_type {
 	MAX_FCOE_CQE_TYPE
 };
 
-
-/*
- * FCoE device type
- */
-enum fcoe_device_type {
-	FCOE_TASK_DEV_TYPE_DISK,
-	FCOE_TASK_DEV_TYPE_TAPE,
-	MAX_FCOE_DEVICE_TYPE
-};
-
-
-
-
 /*
  * FCoE fast path error codes
  */
@@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
 	MAX_FCOE_SP_ERROR_CODE
 };
 
-
-/*
- * FCoE SQE request type
- */
-enum fcoe_sqe_request_type {
-	SEND_FCOE_CMD,
-	SEND_FCOE_MIDPATH,
-	SEND_FCOE_ABTS_REQUEST,
-	FCOE_EXCHANGE_CLEANUP,
-	FCOE_SEQUENCE_RECOVERY,
-	SEND_FCOE_XFER_RDY,
-	SEND_FCOE_RSP,
-	SEND_FCOE_RSP_WITH_SENSE_DATA,
-	SEND_FCOE_TARGET_DATA,
-	SEND_FCOE_INITIATOR_DATA,
-	/*
-	 * Xfer Continuation (==1) ready to be sent. Previous XFERs data
-	 * received successfully.
-	 */
-	SEND_FCOE_XFER_CONTINUATION_RDY,
-	SEND_FCOE_TARGET_ABTS_RSP,
-	MAX_FCOE_SQE_REQUEST_TYPE
-};
-
-
 /*
  * FCoE task TX state
  */
@@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
 	MAX_FCOE_TASK_TX_STATE
 };
 
-
-/*
- * FCoE task type
- */
-enum fcoe_task_type {
-	FCOE_TASK_TYPE_WRITE_INITIATOR,
-	FCOE_TASK_TYPE_READ_INITIATOR,
-	FCOE_TASK_TYPE_MIDPATH,
-	FCOE_TASK_TYPE_UNSOLICITED,
-	FCOE_TASK_TYPE_ABTS,
-	FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
-	FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
-	FCOE_TASK_TYPE_WRITE_TARGET,
-	FCOE_TASK_TYPE_READ_TARGET,
-	FCOE_TASK_TYPE_RSP,
-	FCOE_TASK_TYPE_RSP_SENSE_DATA,
-	FCOE_TASK_TYPE_ABTS_TARGET,
-	FCOE_TASK_TYPE_ENUM_SIZE,
-	MAX_FCOE_TASK_TYPE
-};
-
-struct scsi_glbl_queue_entry {
-	/* Start physical address for the RQ (receive queue) PBL. */
-	struct regpair rq_pbl_addr;
-	/* Start physical address for the CQ (completion queue) PBL. */
-	struct regpair cq_pbl_addr;
-	/* Start physical address for the CMDQ (command queue) PBL. */
-	struct regpair cmdq_pbl_addr;
-};
-
 #endif /* __QEDF_HSI__ */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ded3860..b15e695 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
 }
 
 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
-	struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+	struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
 	struct fcoe_wqe *sqe)
 {
 	enum fcoe_task_type task_type;
@@ -597,7 +597,7 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
 
 	/* Note init_initiator_rw_fcoe_task memsets the task context */
 	io_req->task = task_ctx;
-	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
 	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
 	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 
@@ -673,7 +673,7 @@ static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
 }
 
 void qedf_init_mp_task(struct qedf_ioreq *io_req,
-	struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
 {
 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
 	struct qedf_rport *fcport = io_req->fcport;
@@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
 
 	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
-	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
 	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
 
 	/* Setup the task from io_req for easy reference */
@@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
 	struct Scsi_Host *host = sc_cmd->device->host;
 	struct fc_lport *lport = shost_priv(host);
 	struct qedf_ctx *qedf = lport_priv(lport);
-	struct fcoe_task_context *task_ctx;
+	struct e4_fcoe_task_context *task_ctx;
 	u16 xid;
 	enum fcoe_task_type req_type = 0;
 	struct fcoe_wqe *sqe;
@@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 	struct qedf_ioreq *io_req)
 {
 	u16 xid, rval;
-	struct fcoe_task_context *task_ctx;
+	struct e4_fcoe_task_context *task_ctx;
 	struct scsi_cmnd *sc_cmd;
 	struct fcoe_cqe_rsp_info *fcp_rsp;
 	struct qedf_rport *fcport;
@@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 	struct qedf_rport *fcport;
 	struct qedf_ctx *qedf;
 	uint16_t xid;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	int tmo = 0;
 	int rc = SUCCESS;
 	unsigned long flags;
@@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 	uint8_t tm_flags)
 {
 	struct qedf_ioreq *io_req;
-	struct fcoe_task_context *task;
+	struct e4_fcoe_task_context *task;
 	struct qedf_ctx *qedf = fcport->qedf;
 	struct fc_lport *lport = qedf->lport;
 	int rc = 0;
@@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
 	struct qedf_io_work *io_work;
 	u32 bdq_idx;
 	void *bdq_addr;
+	struct scsi_bd *p_bd_info;
 
+	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
-	    "address.hi=%x address.lo=%x opaque_data.hi=%x "
-	    "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
-	    le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
-	    qedf->bdq_prod_idx, pktlen);
+		  "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
+		  le32_to_cpu(p_bd_info->address.hi),
+		  le32_to_cpu(p_bd_info->address.lo),
+		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
+		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
+		  qedf->bdq_prod_idx, pktlen);
 
-	bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+	bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
 	if (bdq_idx >= QEDF_BDQ_SIZE) {
 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
 		    bdq_idx);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 7c00645..40800dd 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
 	struct qedf_ctx *qedf = fp->qedf;
 	struct global_queue *que;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	u16 prod_idx;
 
 	/* Get the pointer to the global CQ this completion is on */
@@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
 {
 	struct qedf_ctx *qedf = fp->qedf;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	struct global_queue *que;
 	u16 prod_idx;
 	struct fcoe_cqe *cqe;
@@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work)
 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
 	struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int ret;
 
 	sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
-	    sizeof(struct status_block), &sb_phys, GFP_KERNEL);
+	    sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
 
 	if (!sb_virt) {
 		QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
@@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
 	for (i = 0; i < QEDF_BDQ_SIZE; i++) {
 		pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
 		pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
-		pbl->opaque.hi = 0;
+		pbl->opaque.fcoe_opaque.hi = 0;
 		/* Opaque lo data is an index into the BDQ array */
-		pbl->opaque.lo = cpu_to_le32(i);
+		pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
 		pbl++;
 	}
 
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 397b3b8..c247805 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -7,9 +7,9 @@
  *  this source tree.
  */
 
-#define QEDF_VERSION		"8.20.5.0"
+#define QEDF_VERSION		"8.33.0.20"
 #define QEDF_DRIVER_MAJOR_VER		8
-#define QEDF_DRIVER_MINOR_VER		20
-#define QEDF_DRIVER_REV_VER		5
-#define QEDF_DRIVER_ENG_VER		0
+#define QEDF_DRIVER_MINOR_VER		33
+#define QEDF_DRIVER_REV_VER		0
+#define QEDF_DRIVER_ENG_VER		20
 
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 39d7781..fd8a1ee 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
 {
 	struct qedi_fastpath *fp = NULL;
 	struct qed_sb_info *sb_info = NULL;
-	struct status_block *sb = NULL;
+	struct status_block_e4 *sb = NULL;
 	struct global_queue *que = NULL;
 	int id;
 	u16 prod_idx;
@@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
 		sb_info = fp->sb_info;
 		sb = sb_info->sb_virt;
 		prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
-			    STATUS_BLOCK_PROD_INDEX_MASK);
+			    STATUS_BLOCK_E4_PROD_INDEX_MASK);
 		seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
 		que = qedi->global_queues[fp->sb_id];
 		seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index bd302d3..092e8f9 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
 {
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_session *session = conn->session;
-	struct iscsi_task_context *task_ctx;
+	struct e4_iscsi_task_context *task_ctx;
 	struct iscsi_text_rsp *resp_hdr_ptr;
 	struct iscsi_text_response_hdr *cqe_text_response;
 	struct qedi_cmd *cmd;
@@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
 {
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_session *session = conn->session;
-	struct iscsi_task_context *task_ctx;
+	struct e4_iscsi_task_context *task_ctx;
 	struct iscsi_login_rsp *resp_hdr_ptr;
 	struct iscsi_login_response_hdr *cqe_login_response;
 	struct qedi_cmd *cmd;
@@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
 		  (qedi->bdq_prod_idx % qedi->rq_num_entries));
 
 	/* Obtain buffer address from rqe_opaque */
-	idx = cqe->rqe_opaque.lo;
+	idx = cqe->rqe_opaque;
 	if (idx > (QEDI_BDQ_NUM - 1)) {
 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 			  "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
 	}
 
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
-		  "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
-		  cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
+		  "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
 
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 		  "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
@@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
 	struct scsi_bd *pbl;
 
 	/* Obtain buffer address from rqe_opaque */
-	idx = cqe->rqe_opaque.lo;
+	idx = cqe->rqe_opaque;
 	if (idx > (QEDI_BDQ_NUM - 1)) {
 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 			  "wrong idx %d returned by FW, dropping the unsolicited pkt\n",
@@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 		  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
 		  pbl, pbl->address.hi, pbl->address.lo, idx);
-	pbl->opaque.hi = 0;
-	pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
+	pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+	pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+	pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+	pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
 
 	/* Increment producer to let f/w know we've handled the frame */
 	qedi->bdq_prod_idx += count;
@@ -1017,7 +1018,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params tx_sgl_task_params;
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_login_req *login_hdr;
 	struct scsi_sge *resp_sge = NULL;
@@ -1037,8 +1038,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1119,7 +1121,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params tx_sgl_task_params;
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_logout *logout_hdr = NULL;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct qedi_cmd *qedi_cmd;
@@ -1137,8 +1139,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1467,7 +1470,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 	struct iscsi_tmf_request_hdr tmf_pdu_header;
 	struct iscsi_task_params task_params;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_task *ctask;
 	struct iscsi_tm *tmf_hdr;
@@ -1490,8 +1493,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1605,7 +1609,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params tx_sgl_task_params;
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_text *text_hdr;
 	struct scsi_sge *req_sge = NULL;
@@ -1627,8 +1631,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -1705,7 +1710,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 	struct scsi_sgl_task_params rx_sgl_task_params;
 	struct iscsi_task_params task_params;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_nopout *nopout_hdr;
 	struct scsi_sge *resp_sge = NULL;
 	struct qedi_cmd *qedi_cmd;
@@ -1725,8 +1730,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
@@ -2046,7 +2052,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
 	struct iscsi_task_params task_params;
 	struct iscsi_conn_params conn_params;
 	struct scsi_initiator_cmd_params cmd_params;
-	struct iscsi_task_context *fw_task_ctx;
+	struct e4_iscsi_task_context *fw_task_ctx;
 	struct iscsi_cls_conn *cls_conn;
 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
 	enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2069,8 +2075,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
 		return -ENOMEM;
 
 	fw_task_ctx =
-	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
-	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+	     (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+							       tid);
+	memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
 
 	cmd->task_id = tid;
 
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 7df32a6..a269da1 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
 				    struct data_hdr *pdu_header,
 				    enum iscsi_task_type task_type)
 {
-	struct iscsi_task_context *context;
-	u16 index;
+	struct e4_iscsi_task_context *context;
 	u32 val;
+	u16 index;
+	u8 val_byte;
 
 	context = task_params->context;
+	val_byte = context->mstorm_ag_context.cdu_validation;
 	memset(context, 0, sizeof(*context));
+	context->mstorm_ag_context.cdu_validation = val_byte;
 
 	for (index = 0; index <
 	     ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
@@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
 					    cpu_to_le16(task_params->conn_icid);
 
 	SET_FIELD(context->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+		  E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
 
 	context->ustorm_st_context.task_type = task_type;
 	context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -252,10 +255,9 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
 
 static
 void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
-			       struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
-			       u32 remaining_recv_len,
-			       u32 expected_data_transfer_len,
-			       u8 num_sges, bool tx_dif_conn_err_en)
+			struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+			u32 remaining_recv_len, u32 expected_data_transfer_len,
+			u8 num_sges, bool tx_dif_conn_err_en)
 {
 	u32 val;
 
@@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
 	ustorm_st_cxt->exp_data_transfer_len = val;
 	SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
 	SET_FIELD(ustorm_ag_cxt->flags2,
-		  USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+		  E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
 		  tx_dif_conn_err_en ? 1 : 0);
 }
 
 static
-void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
 					struct iscsi_conn_params  *conn_params,
 					enum iscsi_task_type task_type,
 					u32 task_size,
@@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
 			     cpu_to_le16(dif_task_params->application_tag_mask);
 		SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
 			  dif_task_params->crc_seed ? 1 : 0);
-		SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+		SET_FIELD(rdif_context->flags0,
+			  RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
 			  dif_task_params->host_guard_type);
 		SET_FIELD(rdif_context->flags0,
-			  RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+			  RDIF_TASK_CONTEXT_PROTECTION_TYPE,
 			  dif_task_params->protection_type);
 		SET_FIELD(rdif_context->flags0,
-			  RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+			  RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
 		SET_FIELD(rdif_context->flags0,
-			  RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+			  RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
 			  dif_task_params->keep_ref_tag_const ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+			  RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
 			  (dif_task_params->validate_app_tag &&
 			  dif_task_params->dif_on_network) ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_VALIDATEGUARD,
+			  RDIF_TASK_CONTEXT_VALIDATE_GUARD,
 			  (dif_task_params->validate_guard &&
 			  dif_task_params->dif_on_network) ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+			  RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
 			  (dif_task_params->validate_ref_tag &&
 			  dif_task_params->dif_on_network) ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_HOSTINTERFACE,
+			  RDIF_TASK_CONTEXT_HOST_INTERFACE,
 			  dif_task_params->dif_on_host ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+			  RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
 			  dif_task_params->dif_on_network ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDGUARD,
+			  RDIF_TASK_CONTEXT_FORWARD_GUARD,
 			  dif_task_params->forward_guard ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+			  RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
 			  dif_task_params->forward_app_tag ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDREFTAG,
+			  RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
 			  dif_task_params->forward_ref_tag ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+			  RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
 			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+			  RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
 			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
 		SET_FIELD(rdif_context->flags1,
-			  RDIF_TASK_CONTEXT_INTERVALSIZE,
+			  RDIF_TASK_CONTEXT_INTERVAL_SIZE,
 			  dif_task_params->dif_block_size_log - 9);
 		SET_FIELD(rdif_context->state,
-			  RDIF_TASK_CONTEXT_REFTAGMASK,
+			  RDIF_TASK_CONTEXT_REF_TAG_MASK,
 			  dif_task_params->ref_tag_mask);
-		SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+		SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
 			  dif_task_params->ignore_app_tag);
 	}
 
@@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
 	    task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
 		tdif_context->app_tag_value =
 				  cpu_to_le16(dif_task_params->application_tag);
-		tdif_context->partial_crc_valueB =
+		tdif_context->partial_crc_value_b =
 		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
 		tdif_context->partial_crc_value_a =
 		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
@@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
 			  dif_task_params->crc_seed ? 1 : 0);
 
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+			  TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
 			  dif_task_params->tx_dif_conn_err_en ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
 			  dif_task_params->forward_guard   ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
 			  dif_task_params->forward_app_tag ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
 			  dif_task_params->forward_ref_tag ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
 			  dif_task_params->dif_block_size_log - 9);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_HOST_INTERFACE,
 			  dif_task_params->dif_on_host    ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+			  TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
 			  dif_task_params->dif_on_network ? 1 : 0);
 		val = cpu_to_le32(dif_task_params->initial_ref_tag);
 		tdif_context->initial_ref_tag = val;
 		tdif_context->app_tag_mask =
 			     cpu_to_le16(dif_task_params->application_tag_mask);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+			  TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
 			  dif_task_params->host_guard_type);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+			  TDIF_TASK_CONTEXT_PROTECTION_TYPE,
 			  dif_task_params->protection_type);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+			  TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
 			  dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+			  TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
 			  dif_task_params->keep_ref_tag_const ? 1 : 0);
-		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_VALIDATE_GUARD,
 			  (dif_task_params->validate_guard &&
 			   dif_task_params->dif_on_host) ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+			  TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
 			  (dif_task_params->validate_app_tag &&
 			  dif_task_params->dif_on_host) ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+			  TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
 			  (dif_task_params->validate_ref_tag &&
 			   dif_task_params->dif_on_host) ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+			  TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
 			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+			  TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
 			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
 		SET_FIELD(tdif_context->flags1,
-			  TDIF_TASK_CONTEXT_REFTAGMASK,
+			  TDIF_TASK_CONTEXT_REF_TAG_MASK,
 			  dif_task_params->ref_tag_mask);
 		SET_FIELD(tdif_context->flags0,
-			  TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+			  TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
 			  dif_task_params->ignore_app_tag ? 1 : 0);
 	}
 }
 
-static void set_local_completion_context(struct iscsi_task_context *context)
+static void set_local_completion_context(struct e4_iscsi_task_context *context)
 {
 	SET_FIELD(context->ystorm_st_context.state.flags,
 		  YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
 			      struct scsi_dif_task_params *dif_task_params)
 {
 	u32 exp_data_transfer_len = conn_params->max_burst_length;
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 	bool slow_io = false;
 	u32 task_size, val;
 	u8 num_sges = 0;
@@ -494,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
 
 	cxt = task_params->context;
 
-	val = cpu_to_le32(task_size);
-	cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
-	init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
-					     cmd_params);
-	val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
-	cxt->mstorm_st_context.sense_db.lo = val;
 
-	val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
-	cxt->mstorm_st_context.sense_db.hi = val;
+	if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
+		set_local_completion_context(cxt);
+	} else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
+		val = cpu_to_le32(task_size +
+			   ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
+		cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
+		cxt->mstorm_st_context.expected_itt =
+						   cpu_to_le32(pdu_header->itt);
+	} else {
+		val = cpu_to_le32(task_size);
+		cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
+									    val;
+		init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+						     cmd_params);
+		val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+		cxt->mstorm_st_context.sense_db.lo = val;
+
+		val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+		cxt->mstorm_st_context.sense_db.hi = val;
+	}
 
 	if (task_params->tx_io_size) {
 		init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
 				       dif_task_params);
+		init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
+				       dif_task_params);
 		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
 				      &cxt->ystorm_st_context.state.data_desc,
 				      sgl_task_params);
@@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
 				      struct scsi_sgl_task_params *tx_params,
 				      struct scsi_sgl_task_params *rx_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
@@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
 				struct scsi_sgl_task_params *tx_sgl_task_params,
 				struct scsi_sgl_task_params *rx_sgl_task_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
@@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
 				       struct scsi_sgl_task_params *tx_params,
 				       struct scsi_sgl_task_params *rx_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
@@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
 				     struct scsi_sgl_task_params *tx_params,
 				     struct scsi_sgl_task_params *rx_params)
 {
-	struct iscsi_task_context *cxt;
+	struct e4_iscsi_task_context *cxt;
 
 	cxt = task_params->context;
 
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index b6f24f9..c3deb77 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -13,7 +13,7 @@
 #include "qedi_fw_scsi.h"
 
 struct iscsi_task_params {
-	struct iscsi_task_context *context;
+	struct e4_iscsi_task_context *context;
 	struct iscsi_wqe	  *sqe;
 	u32			  tx_io_size;
 	u32			  rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 63d793f..f5b5a31 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
 void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
 void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
 void qedi_process_iscsi_error(struct qedi_endpoint *ep,
-			      struct async_data *data);
+			      struct iscsi_eqe_data *data);
 void qedi_start_conn_recovery(struct qedi_ctx *qedi,
 			      struct qedi_conn *qedi_conn);
 struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+			    struct iscsi_eqe_data *data);
 void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
 void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
 void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index a02b34e..7ec7f6e 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
 	conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
 	conn_info->dup_ack_theshold = 3;
 	conn_info->rcv_wnd = 65535;
-	conn_info->cwnd = DEF_MAX_CWND;
 
 	conn_info->ss_thresh = 65535;
 	conn_info->srtt = 300;
@@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
 				       (qedi_ep->ip_type == TCP_IPV6),
 				       1, (qedi_ep->vlan_id != 0));
 
+	conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
 	conn_info->rcv_wnd_scale = 4;
-	conn_info->ts_ticks_per_second = 1000;
 	conn_info->da_timeout_value = 200;
 	conn_info->ack_frequency = 2;
 
@@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
 	return msg;
 }
 
-void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_iscsi_error(struct qedi_endpoint *ep,
+			      struct iscsi_eqe_data *data)
 {
 	struct qedi_conn *qedi_conn;
 	struct qedi_ctx *qedi;
@@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
 		qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
 }
 
-void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
+void qedi_process_tcp_error(struct qedi_endpoint *ep,
+			    struct iscsi_eqe_data *data)
 {
 	struct qedi_conn *qedi_conn;
 
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 3247287..ea13151 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
 	struct scsi_cmnd *scsi_cmd;
 	struct scatterlist *sg;
 	struct qedi_io_bdt io_tbl;
-	struct iscsi_task_context request;
+	struct e4_iscsi_task_context request;
 	unsigned char *sense_buffer;
 	dma_addr_t sense_buffer_dma;
 	u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cccc34a..a000223 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
 {
 	struct qedi_ctx *qedi;
 	struct qedi_endpoint *qedi_ep;
-	struct async_data *data;
+	struct iscsi_eqe_data *data;
 	int rval = 0;
 
 	if (!context || !fw_handle) {
@@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
 		  "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
 
-	data = (struct async_data *)fw_handle;
+	data = (struct iscsi_eqe_data *)fw_handle;
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
-		  "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
-		   data->cid, data->itid, data->error_code,
-		   data->fw_debug_param);
+		  "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
+		   data->icid, data->conn_id, data->error_code,
+		   data->error_pdu_opcode_reserved);
 
-	qedi_ep = qedi->ep_tbl[data->cid];
+	qedi_ep = qedi->ep_tbl[data->icid];
 
 	if (!qedi_ep) {
 		QEDI_WARN(&qedi->dbg_ctx,
 			  "Cannot process event, ep already disconnected, cid=0x%x\n",
-			   data->cid);
+			   data->icid);
 		WARN_ON(1);
 		return -ENODEV;
 	}
@@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
 static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
 				  struct qed_sb_info *sb_info, u16 sb_id)
 {
-	struct status_block *sb_virt;
+	struct status_block_e4 *sb_virt;
 	dma_addr_t sb_phys;
 	int ret;
 
 	sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
-				     sizeof(struct status_block), &sb_phys,
+				     sizeof(struct status_block_e4), &sb_phys,
 				     GFP_KERNEL);
 	if (!sb_virt) {
 		QEDI_ERR(&qedi->dbg_ctx,
@@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
 
 	qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
 	qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
-	qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
 
 err_alloc_mem:
 	return rval;
@@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
 {
 	struct qedi_ctx *qedi = fp->qedi;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	struct qedi_percpu_s *p = NULL;
 	struct global_queue *que;
 	u16 prod_idx;
@@ -1015,7 +1014,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
 	struct qedi_ctx *qedi = fp->qedi;
 	struct global_queue *que;
 	struct qed_sb_info *sb_info = fp->sb_info;
-	struct status_block *sb = sb_info->sb_virt;
+	struct status_block_e4 *sb = sb_info->sb_virt;
 	u16 prod_idx;
 
 	barrier();
@@ -1262,8 +1261,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
 		QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
 			  "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
 			  pbl, pbl->address.hi, pbl->address.lo, i);
-		pbl->opaque.hi = 0;
-		pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
+		pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
+		pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
+		pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
+		pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
 		pbl++;
 	}
 
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index d61e3ac..8a0e523 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@
  * this source tree.
  */
 
-#define QEDI_MODULE_VERSION	"8.10.4.0"
+#define QEDI_MODULE_VERSION	"8.33.0.20"
 #define QEDI_DRIVER_MAJOR_VER		8
-#define QEDI_DRIVER_MINOR_VER		10
-#define QEDI_DRIVER_REV_VER		4
-#define QEDI_DRIVER_ENG_VER		0
+#define QEDI_DRIVER_MINOR_VER		33
+#define QEDI_DRIVER_REV_VER		0
+#define QEDI_DRIVER_ENG_VER		20
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index d8e4219..f48a2ee 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -3,10 +3,7 @@
 	depends on HAS_IOMEM && HAS_DMA
 	default y
 
-menu "Sonics Silicon Backplane"
-	depends on SSB_POSSIBLE
-
-config SSB
+menuconfig SSB
 	tristate "Sonics Silicon Backplane support"
 	depends on SSB_POSSIBLE
 	help
@@ -21,6 +18,8 @@
 
 	  If unsure, say N.
 
+if SSB
+
 # Common SPROM support routines
 config SSB_SPROM
 	bool
@@ -185,4 +184,4 @@
 
 	  If unsure, say N
 
-endmenu
+endif # SSB
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c7bdeb6..7baa90a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -89,7 +89,7 @@ struct vhost_net_ubuf_ref {
 
 #define VHOST_RX_BATCH 64
 struct vhost_net_buf {
-	struct sk_buff **queue;
+	void **queue;
 	int tail;
 	int head;
 };
@@ -108,7 +108,7 @@ struct vhost_net_virtqueue {
 	/* Reference counting for outstanding ubufs.
 	 * Protected by vq mutex. Writers must also take device mutex. */
 	struct vhost_net_ubuf_ref *ubufs;
-	struct skb_array *rx_array;
+	struct ptr_ring *rx_ring;
 	struct vhost_net_buf rxq;
 };
 
@@ -158,7 +158,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
 	struct vhost_net_buf *rxq = &nvq->rxq;
 
 	rxq->head = 0;
-	rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue,
+	rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
 					      VHOST_RX_BATCH);
 	return rxq->tail;
 }
@@ -167,13 +167,25 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
 {
 	struct vhost_net_buf *rxq = &nvq->rxq;
 
-	if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) {
-		skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head,
-				    vhost_net_buf_get_size(rxq));
+	if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
+		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
+				   vhost_net_buf_get_size(rxq),
+				   __skb_array_destroy_skb);
 		rxq->head = rxq->tail = 0;
 	}
 }
 
+static int vhost_net_buf_peek_len(void *ptr)
+{
+	if (tun_is_xdp_buff(ptr)) {
+		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
+
+		return xdp->data_end - xdp->data;
+	}
+
+	return __skb_array_len_with_tag(ptr);
+}
+
 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
 {
 	struct vhost_net_buf *rxq = &nvq->rxq;
@@ -185,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
 		return 0;
 
 out:
-	return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq));
+	return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
 }
 
 static void vhost_net_buf_init(struct vhost_net_buf *rxq)
@@ -583,7 +595,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
 	int len = 0;
 	unsigned long flags;
 
-	if (rvq->rx_array)
+	if (rvq->rx_ring)
 		return vhost_net_buf_peek(rvq);
 
 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
@@ -744,7 +756,7 @@ static void handle_rx(struct vhost_net *net)
 	};
 	size_t total_len = 0;
 	int err, mergeable;
-	s16 headcount;
+	s16 headcount, nheads = 0;
 	size_t vhost_hlen, sock_hlen;
 	size_t vhost_len, sock_len;
 	struct socket *sock;
@@ -772,7 +784,7 @@ static void handle_rx(struct vhost_net *net)
 	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
 		sock_len += sock_hlen;
 		vhost_len = sock_len + vhost_hlen;
-		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+		headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
 					&in, vq_log, &log,
 					likely(mergeable) ? UIO_MAXIOV : 1);
 		/* On error, stop handling until the next kick. */
@@ -790,7 +802,7 @@ static void handle_rx(struct vhost_net *net)
 			 * they refilled. */
 			goto out;
 		}
-		if (nvq->rx_array)
+		if (nvq->rx_ring)
 			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
 		/* On overrun, truncate and discard */
 		if (unlikely(headcount > UIO_MAXIOV)) {
@@ -844,8 +856,12 @@ static void handle_rx(struct vhost_net *net)
 			vhost_discard_vq_desc(vq, headcount);
 			goto out;
 		}
-		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
-					    headcount);
+		nheads += headcount;
+		if (nheads > VHOST_RX_BATCH) {
+			vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+						    nheads);
+			nheads = 0;
+		}
 		if (unlikely(vq_log))
 			vhost_log_write(vq, vq_log, log, vhost_len);
 		total_len += vhost_len;
@@ -856,6 +872,9 @@ static void handle_rx(struct vhost_net *net)
 	}
 	vhost_net_enable_vq(net, vq);
 out:
+	if (nheads)
+		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+					    nheads);
 	mutex_unlock(&vq->mutex);
 }
 
@@ -896,7 +915,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
 	struct vhost_net *n;
 	struct vhost_dev *dev;
 	struct vhost_virtqueue **vqs;
-	struct sk_buff **queue;
+	void **queue;
 	int i;
 
 	n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
@@ -908,7 +927,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
 		return -ENOMEM;
 	}
 
-	queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *),
+	queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
 			      GFP_KERNEL);
 	if (!queue) {
 		kfree(vqs);
@@ -1046,23 +1065,23 @@ static struct socket *get_raw_socket(int fd)
 	return ERR_PTR(r);
 }
 
-static struct skb_array *get_tap_skb_array(int fd)
+static struct ptr_ring *get_tap_ptr_ring(int fd)
 {
-	struct skb_array *array;
+	struct ptr_ring *ring;
 	struct file *file = fget(fd);
 
 	if (!file)
 		return NULL;
-	array = tun_get_skb_array(file);
-	if (!IS_ERR(array))
+	ring = tun_get_tx_ring(file);
+	if (!IS_ERR(ring))
 		goto out;
-	array = tap_get_skb_array(file);
-	if (!IS_ERR(array))
+	ring = tap_get_ptr_ring(file);
+	if (!IS_ERR(ring))
 		goto out;
-	array = NULL;
+	ring = NULL;
 out:
 	fput(file);
-	return array;
+	return ring;
 }
 
 static struct socket *get_tap_socket(int fd)
@@ -1143,7 +1162,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
 		vq->private_data = sock;
 		vhost_net_buf_unproduce(nvq);
 		if (index == VHOST_NET_VQ_RX)
-			nvq->rx_array = get_tap_skb_array(fd);
+			nvq->rx_ring = get_tap_ptr_ring(fd);
 		r = vhost_vq_init_access(vq);
 		if (r)
 			goto err_used;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a8ecccf..5da18eb 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -30,6 +30,7 @@
 #include <linux/ratelimit.h>
 #include <linux/uuid.h>
 #include <linux/semaphore.h>
+#include <linux/bpf.h>
 #include <asm/unaligned.h>
 #include "ctree.h"
 #include "disk-io.h"
@@ -3123,6 +3124,7 @@ int open_ctree(struct super_block *sb,
 		goto fail_block_groups;
 	goto retry_root_backup;
 }
+BPF_ALLOW_ERROR_INJECTION(open_ctree);
 
 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 4426d1c..fb13828 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/math64.h>
 #include <linux/ratelimit.h>
+#include <linux/bpf.h>
 #include "ctree.h"
 #include "free-space-cache.h"
 #include "transaction.h"
@@ -332,6 +333,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
 
 	return 0;
 }
+BPF_ALLOW_ERROR_INJECTION(io_ctl_init);
 
 static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
 {
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 11066d8..90af87f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1549,16 +1549,13 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
 	rhashtable_walk_enter(&gl_hash_table, &iter);
 
 	do {
-		gl = ERR_PTR(rhashtable_walk_start(&iter));
-		if (IS_ERR(gl))
-			goto walk_stop;
+		rhashtable_walk_start(&iter);
 
 		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
 			if (gl->gl_name.ln_sbd == sdp &&
 			    lockref_get_not_dead(&gl->gl_lockref))
 				examiner(gl);
 
-walk_stop:
 		rhashtable_walk_stop(&iter);
 	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
 
@@ -1947,7 +1944,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
 	loff_t n = *pos;
 
 	rhashtable_walk_enter(&gl_hash_table, &gi->hti);
-	if (rhashtable_walk_start(&gi->hti) != 0)
+	if (rhashtable_walk_start_check(&gi->hti) != 0)
 		return NULL;
 
 	do {
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 7c6f76d..36b0772 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -103,14 +103,14 @@ static void *__ns_get_path(struct path *path, struct ns_common *ns)
 	goto got_it;
 }
 
-void *ns_get_path(struct path *path, struct task_struct *task,
-			const struct proc_ns_operations *ns_ops)
+void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb,
+		     void *private_data)
 {
 	struct ns_common *ns;
 	void *ret;
 
 again:
-	ns = ns_ops->get(task);
+	ns = ns_get_cb(private_data);
 	if (!ns)
 		return ERR_PTR(-ENOENT);
 
@@ -120,6 +120,29 @@ void *ns_get_path(struct path *path, struct task_struct *task,
 	return ret;
 }
 
+struct ns_get_path_task_args {
+	const struct proc_ns_operations *ns_ops;
+	struct task_struct *task;
+};
+
+static struct ns_common *ns_get_path_task(void *private_data)
+{
+	struct ns_get_path_task_args *args = private_data;
+
+	return args->ns_ops->get(args->task);
+}
+
+void *ns_get_path(struct path *path, struct task_struct *task,
+		  const struct proc_ns_operations *ns_ops)
+{
+	struct ns_get_path_task_args args = {
+		.ns_ops	= ns_ops,
+		.task	= task,
+	};
+
+	return ns_get_path_cb(path, ns_get_path_task, &args);
+}
+
 int open_related_ns(struct ns_common *ns,
 		   struct ns_common *(*get_ns)(struct ns_common *ns))
 {
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ee8b707..a2e8582 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -136,6 +136,15 @@
 #define KPROBE_BLACKLIST()
 #endif
 
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+#define ERROR_INJECT_LIST()	. = ALIGN(8);						\
+				VMLINUX_SYMBOL(__start_kprobe_error_inject_list) = .;	\
+				KEEP(*(_kprobe_error_inject_list))			\
+				VMLINUX_SYMBOL(__stop_kprobe_error_inject_list) = .;
+#else
+#define ERROR_INJECT_LIST()
+#endif
+
 #ifdef CONFIG_EVENT_TRACING
 #define FTRACE_EVENTS()	. = ALIGN(8);					\
 			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
@@ -564,6 +573,7 @@
 	FTRACE_EVENTS()							\
 	TRACE_SYSCALLS()						\
 	KPROBE_BLACKLIST()						\
+	ERROR_INJECT_LIST()						\
 	MEM_DISCARD(init.rodata)					\
 	CLK_OF_TABLES()							\
 	RESERVEDMEM_OF_TABLES()						\
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0b25cf8..44f26f6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -17,6 +17,7 @@
 #include <linux/numa.h>
 #include <linux/wait.h>
 
+struct bpf_verifier_env;
 struct perf_event;
 struct bpf_prog;
 struct bpf_map;
@@ -193,14 +194,18 @@ struct bpf_verifier_ops {
 				  struct bpf_prog *prog, u32 *target_size);
 };
 
+struct bpf_prog_offload_ops {
+	int (*insn_hook)(struct bpf_verifier_env *env,
+			 int insn_idx, int prev_insn_idx);
+};
+
 struct bpf_dev_offload {
 	struct bpf_prog		*prog;
 	struct net_device	*netdev;
 	void			*dev_priv;
 	struct list_head	offloads;
 	bool			dev_state;
-	bool			verifier_running;
-	wait_queue_head_t	verifier_done;
+	const struct bpf_prog_offload_ops *dev_ops;
 };
 
 struct bpf_prog_aux {
@@ -209,6 +214,10 @@ struct bpf_prog_aux {
 	u32 max_ctx_offset;
 	u32 stack_depth;
 	u32 id;
+	u32 func_cnt;
+	bool offload_requested;
+	struct bpf_prog **func;
+	void *jit_data; /* JIT specific data. arch dependent */
 	struct latch_tree_node ksym_tnode;
 	struct list_head ksym_lnode;
 	const struct bpf_prog_ops *ops;
@@ -295,6 +304,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
 
 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
 				struct bpf_prog *old_prog);
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+			     __u32 __user *prog_ids, u32 request_cnt,
+			     __u32 __user *prog_cnt);
 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
 			struct bpf_prog *exclude_prog,
 			struct bpf_prog *include_prog,
@@ -355,6 +367,8 @@ void bpf_prog_put(struct bpf_prog *prog);
 int __bpf_prog_charge(struct user_struct *user, u32 pages);
 void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
 
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
@@ -409,6 +423,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
 
 /* verify correctness of eBPF program */
 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
@@ -536,13 +551,15 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
 
 int bpf_prog_offload_compile(struct bpf_prog *prog);
 void bpf_prog_offload_destroy(struct bpf_prog *prog);
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+			       struct bpf_prog *prog);
 
 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
 
 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 {
-	return aux->offload;
+	return aux->offload_requested;
 }
 #else
 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
@@ -557,7 +574,7 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
 }
 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
 
-#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
 #else
@@ -596,4 +613,15 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
 void bpf_user_rnd_init_once(void);
 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+#define BPF_ALLOW_ERROR_INJECTION(fname)				\
+static unsigned long __used						\
+	__attribute__((__section__("_kprobe_error_inject_list")))	\
+	_eil_addr_##fname = (unsigned long)fname;
+#else
+#define BPF_ALLOW_ERROR_INJECTION(fname)
+#endif
+#endif
+
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 978c1d9..19b8349 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
 BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
 #ifdef CONFIG_NET
 BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-#ifdef CONFIG_STREAM_PARSER
+#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET)
 BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
 #endif
 BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 1632bb1..6b66cd1 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -76,6 +76,14 @@ struct bpf_reg_state {
 	s64 smax_value; /* maximum possible (s64)value */
 	u64 umin_value; /* minimum possible (u64)value */
 	u64 umax_value; /* maximum possible (u64)value */
+	/* Inside the callee two registers can be both PTR_TO_STACK like
+	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
+	 * while another to the caller's stack. To differentiate them 'frameno'
+	 * is used which is an index in bpf_verifier_state->frame[] array
+	 * pointing to bpf_func_state.
+	 * This field must be second to last, for states_equal() reasons.
+	 */
+	u32 frameno;
 	/* This field must be last, for states_equal() reasons. */
 	enum bpf_reg_liveness live;
 };
@@ -83,7 +91,8 @@ struct bpf_reg_state {
 enum bpf_stack_slot_type {
 	STACK_INVALID,    /* nothing was stored in this stack slot */
 	STACK_SPILL,      /* register spilled into stack */
-	STACK_MISC	  /* BPF program wrote some data into this slot */
+	STACK_MISC,	  /* BPF program wrote some data into this slot */
+	STACK_ZERO,	  /* BPF program wrote constant zero */
 };
 
 #define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
@@ -96,13 +105,34 @@ struct bpf_stack_state {
 /* state of the program:
  * type of all registers and stack info
  */
-struct bpf_verifier_state {
+struct bpf_func_state {
 	struct bpf_reg_state regs[MAX_BPF_REG];
 	struct bpf_verifier_state *parent;
+	/* index of call instruction that called into this func */
+	int callsite;
+	/* stack frame number of this function state from pov of
+	 * enclosing bpf_verifier_state.
+	 * 0 = main function, 1 = first callee.
+	 */
+	u32 frameno;
+	/* subprog number == index within subprog_stack_depth
+	 * zero == main subprog
+	 */
+	u32 subprogno;
+
+	/* should be second to last. See copy_func_state() */
 	int allocated_stack;
 	struct bpf_stack_state *stack;
 };
 
+#define MAX_CALL_FRAMES 8
+struct bpf_verifier_state {
+	/* call stack tracking */
+	struct bpf_func_state *frame[MAX_CALL_FRAMES];
+	struct bpf_verifier_state *parent;
+	u32 curframe;
+};
+
 /* linked list of verifier states used to prune search */
 struct bpf_verifier_state_list {
 	struct bpf_verifier_state state;
@@ -113,6 +143,7 @@ struct bpf_insn_aux_data {
 	union {
 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
 		struct bpf_map *map_ptr;	/* pointer for call insn into lookup_elem */
+		s32 call_imm;			/* saved imm field of call insn */
 	};
 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
 	bool seen; /* this insn was processed by the verifier */
@@ -135,11 +166,7 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
 	return log->len_used >= log->len_total - 1;
 }
 
-struct bpf_verifier_env;
-struct bpf_ext_analyzer_ops {
-	int (*insn_hook)(struct bpf_verifier_env *env,
-			 int insn_idx, int prev_insn_idx);
-};
+#define BPF_MAX_SUBPROGS 256
 
 /* single container for all structs
  * one verifier_env per bpf_check() call
@@ -152,29 +179,31 @@ struct bpf_verifier_env {
 	bool strict_alignment;		/* perform strict pointer alignment checks */
 	struct bpf_verifier_state *cur_state; /* current verifier state */
 	struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
-	const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
 	u32 used_map_cnt;		/* number of used maps */
 	u32 id_gen;			/* used to generate unique reg IDs */
 	bool allow_ptr_leaks;
 	bool seen_direct_write;
 	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
-
 	struct bpf_verifer_log log;
+	u32 subprog_starts[BPF_MAX_SUBPROGS];
+	/* computes the stack depth of each bpf function */
+	u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
+	u32 subprog_cnt;
 };
 
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+					   const char *fmt, ...);
+
 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
 {
-	return env->cur_state->regs;
+	struct bpf_verifier_state *cur = env->cur_state;
+
+	return cur->frame[cur->curframe]->regs;
 }
 
-#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
-#else
-static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
-{
-	return -EOPNOTSUPP;
-}
-#endif
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+				 int insn_idx, int prev_insn_idx);
 
 #endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 8ff86b4..d3339dd 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -14,6 +14,7 @@
 #define PHY_ID_BCM5241			0x0143bc30
 #define PHY_ID_BCMAC131			0x0143bc70
 #define PHY_ID_BCM5481			0x0143bca0
+#define PHY_ID_BCM5395			0x0143bcf0
 #define PHY_ID_BCM54810			0x03625d00
 #define PHY_ID_BCM5482			0x0143bcb0
 #define PHY_ID_BCM5411			0x00206070
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index f48a85c..b4f2211 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -23,9 +23,10 @@ struct lan9303 {
 	struct regmap_irq_chip_data *irq_data;
 	struct gpio_desc *reset_gpio;
 	u32 reset_duration; /* in [ms] */
-	bool phy_addr_sel_strap;
+	int phy_addr_base;
 	struct dsa_switch *ds;
 	struct mutex indirect_mutex; /* protect indexed register access */
+	struct mutex alr_mutex; /* protect ALR access */
 	const struct lan9303_phy_ops *ops;
 	bool is_bridged; /* true if port 1 and 2 are bridged */
 
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 80b5b48..425056c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -18,7 +18,9 @@
 #include <linux/capability.h>
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
+#include <linux/kallsyms.h>
 
+#include <net/xdp.h>
 #include <net/sch_generic.h>
 
 #include <uapi/linux/filter.h>
@@ -58,6 +60,9 @@ struct bpf_prog_aux;
 /* unused opcode to mark special call to bpf_tail_call() helper */
 #define BPF_TAIL_CALL	0xf0
 
+/* unused opcode to mark call to interpreter with arguments */
+#define BPF_CALL_ARGS	0xe0
+
 /* As per nm, we expose JITed images as text (code) section for
  * kallsyms. That way, tools like perf can find it to match
  * addresses.
@@ -455,10 +460,14 @@ struct bpf_binary_header {
 struct bpf_prog {
 	u16			pages;		/* Number of allocated pages */
 	u16			jited:1,	/* Is our filter JIT'ed? */
+				jit_requested:1,/* archs need to JIT the prog */
 				locked:1,	/* Program image locked? */
 				gpl_compatible:1, /* Is filter GPL compatible? */
 				cb_access:1,	/* Is control block accessed? */
-				dst_needed:1;	/* Do we need dst entry? */
+				dst_needed:1,	/* Do we need dst entry? */
+				blinded:1,	/* Was blinded */
+				is_func:1,	/* program is a bpf function */
+				kprobe_override:1; /* Do we override a kprobe? */
 	enum bpf_prog_type	type;		/* Type of BPF program */
 	u32			len;		/* Number of filter blocks */
 	u32			jited_len;	/* Size of jited insns in bytes */
@@ -495,6 +504,7 @@ struct xdp_buff {
 	void *data_end;
 	void *data_meta;
 	void *data_hard_start;
+	struct xdp_rxq_info *rxq;
 };
 
 /* Compute the linear packet data range [data, data_end) which
@@ -709,11 +719,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+#define __bpf_call_base_args \
+	((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+	 __bpf_call_base)
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_helper_changes_pkt_data(void *func);
 
+static inline bool bpf_dump_raw_ok(void)
+{
+	/* Reconstruction of call-sites is dependent on kallsyms,
+	 * thus make dump the same restriction.
+	 */
+	return kallsyms_show_value() == 1;
+}
+
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 				       const struct bpf_insn *patch, u32 len);
 
@@ -797,7 +818,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
 	return fp->jited && bpf_jit_is_ebpf();
 }
 
-static inline bool bpf_jit_blinding_enabled(void)
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
 {
 	/* These are the prerequisites, should someone ever have the
 	 * idea to call blinding outside of them, we make sure to
@@ -805,7 +826,7 @@ static inline bool bpf_jit_blinding_enabled(void)
 	 */
 	if (!bpf_jit_is_ebpf())
 		return false;
-	if (!bpf_jit_enable)
+	if (!prog->jit_requested)
 		return false;
 	if (!bpf_jit_harden)
 		return false;
@@ -985,6 +1006,7 @@ struct bpf_sock_ops_kern {
 		u32 reply;
 		u32 replylong[4];
 	};
+	u32	is_fullsock;
 };
 
 #endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6c93366..93bd6fc 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -127,28 +127,6 @@ struct hv_ring_buffer_info {
 	u32 priv_read_index;
 };
 
-/*
- *
- * hv_get_ringbuffer_availbytes()
- *
- * Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static inline void
-hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
-			     u32 *read, u32 *write)
-{
-	u32 read_loc, write_loc, dsize;
-
-	/* Capture the read/write indices before they changed */
-	read_loc = rbi->ring_buffer->read_index;
-	write_loc = rbi->ring_buffer->write_index;
-	dsize = rbi->ring_datasize;
-
-	*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
-		read_loc - write_loc;
-	*read = dsize - *write;
-}
 
 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
 {
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4c54611..622658d 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -13,6 +13,8 @@ struct ifla_vf_stats {
 	__u64 tx_bytes;
 	__u64 broadcast;
 	__u64 multicast;
+	__u64 rx_dropped;
+	__u64 tx_dropped;
 };
 
 struct ifla_vf_info {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index bedf54b..4cb7aee 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -30,10 +30,10 @@ struct macvlan_dev {
 	enum macvlan_mode	mode;
 	u16			flags;
 	int			nest_level;
+	unsigned int		macaddr_count;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	struct netpoll		*netpoll;
 #endif
-	unsigned int		macaddr_count;
 };
 
 static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 3ecef57..8e66866 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -4,7 +4,7 @@
 
 #if IS_ENABLED(CONFIG_TAP)
 struct socket *tap_get_socket(struct file *);
-struct skb_array *tap_get_skb_array(struct file *file);
+struct ptr_ring *tap_get_ptr_ring(struct file *file);
 #else
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
-static inline struct skb_array *tap_get_skb_array(struct file *f)
+static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
@@ -70,7 +70,7 @@ struct tap_queue {
 	u16 queue_index;
 	bool enabled;
 	struct list_head next;
-	struct skb_array skb_array;
+	struct ptr_ring ring;
 };
 
 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index bf9bdf4..c5b0a75 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -17,9 +17,14 @@
 
 #include <uapi/linux/if_tun.h>
 
+#define TUN_XDP_FLAG 0x1UL
+
 #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
 struct socket *tun_get_socket(struct file *);
-struct skb_array *tun_get_skb_array(struct file *file);
+struct ptr_ring *tun_get_tx_ring(struct file *file);
+bool tun_is_xdp_buff(void *ptr);
+void *tun_xdp_to_ptr(void *ptr);
+void *tun_ptr_to_xdp(void *ptr);
 #else
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
-static inline struct skb_array *tun_get_skb_array(struct file *f)
+static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
 {
 	return ERR_PTR(-EINVAL);
 }
+static inline bool tun_is_xdp_buff(void *ptr)
+{
+	return false;
+}
+static inline void *tun_xdp_to_ptr(void *ptr)
+{
+	return NULL;
+}
+static inline void *tun_ptr_to_xdp(void *ptr)
+{
+	return NULL;
+}
 #endif /* CONFIG_TUN */
 #endif /* __IF_TUN_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9440a2f..963fd36 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -271,6 +271,7 @@ extern bool arch_kprobe_on_func_entry(unsigned long offset);
 extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
 
 extern bool within_kprobe_blacklist(unsigned long addr);
+extern bool within_kprobe_error_injection_list(unsigned long addr);
 
 struct kprobe_insn_cache {
 	struct mutex mutex;
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ca08ab1..2cfffe5 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -12,6 +12,7 @@
 #include <uapi/linux/mdio.h>
 #include <linux/mod_devicetable.h>
 
+struct gpio_desc;
 struct mii_bus;
 
 /* Multiple levels of nesting are possible. However typically this is
@@ -39,6 +40,9 @@ struct mdio_device {
 	/* Bus address of the MDIO device (0-31) */
 	int addr;
 	int flags;
+	struct gpio_desc *reset;
+	unsigned int reset_assert_delay;
+	unsigned int reset_deassert_delay;
 };
 #define to_mdio_device(d) container_of(d, struct mdio_device, dev)
 
@@ -71,6 +75,7 @@ void mdio_device_free(struct mdio_device *mdiodev);
 struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr);
 int mdio_device_register(struct mdio_device *mdiodev);
 void mdio_device_remove(struct mdio_device *mdiodev);
+void mdio_device_reset(struct mdio_device *mdiodev, int value);
 int mdio_driver_register(struct mdio_driver *drv);
 void mdio_driver_unregister(struct mdio_driver *drv);
 int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
@@ -257,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
 	return reg;
 }
 
+int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+
 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7ba..a0b48af 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -95,6 +95,10 @@ struct mlx5_flow_destination {
 struct mlx5_flow_namespace *
 mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 			enum mlx5_flow_namespace_type type);
+struct mlx5_flow_namespace *
+mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+				  enum mlx5_flow_namespace_type type,
+				  int vport);
 
 struct mlx5_flow_table *
 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index d44ec5f..78e36fc 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -794,7 +794,10 @@ enum {
 };
 
 struct mlx5_ifc_cmd_hca_cap_bits {
-	u8         reserved_at_0[0x80];
+	u8         reserved_at_0[0x30];
+	u8         vhca_id[0x10];
+
+	u8         reserved_at_40[0x40];
 
 	u8         log_max_srq_sz[0x8];
 	u8         log_max_qp_sz[0x8];
@@ -1023,12 +1026,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 	u8         reserved_at_3b8[0x3];
 	u8         log_min_stride_sz_sq[0x5];
 
-	u8         reserved_at_3c0[0x1b];
+	u8         hairpin[0x1];
+	u8         reserved_at_3c1[0x2];
+	u8         log_max_hairpin_queues[0x5];
+	u8         reserved_at_3c8[0x3];
+	u8         log_max_hairpin_wq_data_sz[0x5];
+	u8         reserved_at_3d0[0xb];
 	u8         log_max_wq_sz[0x5];
 
 	u8         nic_vport_change_event[0x1];
 	u8         disable_local_lb[0x1];
-	u8         reserved_at_3e2[0x9];
+	u8         reserved_at_3e2[0x1];
+	u8         log_min_hairpin_wq_data_sz[0x5];
+	u8         reserved_at_3e8[0x3];
 	u8         log_max_vlan_list[0x5];
 	u8         reserved_at_3f0[0x3];
 	u8         log_max_current_mc_list[0x5];
@@ -1162,7 +1172,10 @@ struct mlx5_ifc_wq_bits {
 	u8         reserved_at_118[0x3];
 	u8         log_wq_sz[0x5];
 
-	u8         reserved_at_120[0x15];
+	u8         reserved_at_120[0xb];
+	u8         log_hairpin_data_sz[0x5];
+	u8         reserved_at_130[0x5];
+
 	u8         log_wqe_num_of_strides[0x3];
 	u8         two_byte_shift_en[0x1];
 	u8         reserved_at_139[0x4];
@@ -2482,7 +2495,8 @@ struct mlx5_ifc_sqc_bits {
 	u8         state[0x4];
 	u8         reg_umr[0x1];
 	u8         allow_swp[0x1];
-	u8         reserved_at_e[0x12];
+	u8         hairpin[0x1];
+	u8         reserved_at_f[0x11];
 
 	u8         reserved_at_20[0x8];
 	u8         user_index[0x18];
@@ -2490,7 +2504,13 @@ struct mlx5_ifc_sqc_bits {
 	u8         reserved_at_40[0x8];
 	u8         cqn[0x18];
 
-	u8         reserved_at_60[0x90];
+	u8         reserved_at_60[0x8];
+	u8         hairpin_peer_rq[0x18];
+
+	u8         reserved_at_80[0x10];
+	u8         hairpin_peer_vhca[0x10];
+
+	u8         reserved_at_a0[0x50];
 
 	u8         packet_pacing_rate_limit_index[0x10];
 	u8         tis_lst_sz[0x10];
@@ -2562,7 +2582,8 @@ struct mlx5_ifc_rqc_bits {
 	u8         state[0x4];
 	u8         reserved_at_c[0x1];
 	u8         flush_in_error_en[0x1];
-	u8         reserved_at_e[0x12];
+	u8         hairpin[0x1];
+	u8         reserved_at_f[0x11];
 
 	u8         reserved_at_20[0x8];
 	u8         user_index[0x18];
@@ -2576,7 +2597,13 @@ struct mlx5_ifc_rqc_bits {
 	u8         reserved_at_80[0x8];
 	u8         rmpn[0x18];
 
-	u8         reserved_at_a0[0xe0];
+	u8         reserved_at_a0[0x8];
+	u8         hairpin_peer_sq[0x18];
+
+	u8         reserved_at_c0[0x10];
+	u8         hairpin_peer_vhca[0x10];
+
+	u8         reserved_at_e0[0xa0];
 
 	struct mlx5_ifc_wq_bits wq;
 };
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 88441f5..a228310 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -75,4 +75,23 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
 			 int inlen);
 void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
 
+struct mlx5_hairpin_params {
+	u8  log_data_size;
+	u16 q_counter;
+};
+
+struct mlx5_hairpin {
+	struct mlx5_core_dev *func_mdev;
+	struct mlx5_core_dev *peer_mdev;
+
+	u32 rqn;
+	u32 sqn;
+};
+
+struct mlx5_hairpin *
+mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
+			 struct mlx5_core_dev *peer_mdev,
+			 struct mlx5_hairpin_params *params);
+
+void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
 #endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/module.h b/include/linux/module.h
index c69b49a..548fa09 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -475,6 +475,11 @@ struct module {
 	ctor_fn_t *ctors;
 	unsigned int num_ctors;
 #endif
+
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+	unsigned int num_kprobe_ei_funcs;
+	unsigned long *kprobe_ei_funcs;
+#endif
 } ____cacheline_aligned __randomize_layout;
 #ifndef MODULE_ARCH_INIT
 #define MODULE_ARCH_INIT {}
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
new file mode 100644
index 0000000..1c7e450
--- /dev/null
+++ b/include/linux/net_dim.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NET_DIM_H
+#define NET_DIM_H
+
+#include <linux/module.h>
+
+struct net_dim_cq_moder {
+	u16 usec;
+	u16 pkts;
+	u8 cq_period_mode;
+};
+
+struct net_dim_sample {
+	ktime_t time;
+	u32     pkt_ctr;
+	u32     byte_ctr;
+	u16     event_ctr;
+};
+
+struct net_dim_stats {
+	int ppms; /* packets per msec */
+	int bpms; /* bytes per msec */
+	int epms; /* events per msec */
+};
+
+struct net_dim { /* Adaptive Moderation */
+	u8                                      state;
+	struct net_dim_stats                    prev_stats;
+	struct net_dim_sample                   start_sample;
+	struct work_struct                      work;
+	u8                                      profile_ix;
+	u8                                      mode;
+	u8                                      tune_state;
+	u8                                      steps_right;
+	u8                                      steps_left;
+	u8                                      tired;
+};
+
+enum {
+	NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+	NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+	NET_DIM_CQ_PERIOD_NUM_MODES
+};
+
+/* Adaptive moderation logic */
+enum {
+	NET_DIM_START_MEASURE,
+	NET_DIM_MEASURE_IN_PROGRESS,
+	NET_DIM_APPLY_NEW_PROFILE,
+};
+
+enum {
+	NET_DIM_PARKING_ON_TOP,
+	NET_DIM_PARKING_TIRED,
+	NET_DIM_GOING_RIGHT,
+	NET_DIM_GOING_LEFT,
+};
+
+enum {
+	NET_DIM_STATS_WORSE,
+	NET_DIM_STATS_SAME,
+	NET_DIM_STATS_BETTER,
+};
+
+enum {
+	NET_DIM_STEPPED,
+	NET_DIM_TOO_TIRED,
+	NET_DIM_ON_EDGE,
+};
+
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+/* Adaptive moderation profiles */
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
+#define NET_DIM_EQE_PROFILES { \
+	{1,   NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{8,   NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{64,  NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_CQE_PROFILES { \
+	{2,  256},             \
+	{8,  128},             \
+	{16, 64},              \
+	{32, 64},              \
+	{64, 64}               \
+}
+
+static const struct net_dim_cq_moder
+profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+	NET_DIM_EQE_PROFILES,
+	NET_DIM_CQE_PROFILES,
+};
+
+static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode,
+							  int ix)
+{
+	struct net_dim_cq_moder cq_moder;
+
+	cq_moder = profile[cq_period_mode][ix];
+	cq_moder.cq_period_mode = cq_period_mode;
+	return cq_moder;
+}
+
+static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode)
+{
+	int default_profile_ix;
+
+	if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
+		default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
+	else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
+		default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
+
+	return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
+}
+
+static inline bool net_dim_on_top(struct net_dim *dim)
+{
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+	case NET_DIM_PARKING_TIRED:
+		return true;
+	case NET_DIM_GOING_RIGHT:
+		return (dim->steps_left > 1) && (dim->steps_right == 1);
+	default: /* NET_DIM_GOING_LEFT */
+		return (dim->steps_right > 1) && (dim->steps_left == 1);
+	}
+}
+
+static inline void net_dim_turn(struct net_dim *dim)
+{
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+	case NET_DIM_PARKING_TIRED:
+		break;
+	case NET_DIM_GOING_RIGHT:
+		dim->tune_state = NET_DIM_GOING_LEFT;
+		dim->steps_left = 0;
+		break;
+	case NET_DIM_GOING_LEFT:
+		dim->tune_state = NET_DIM_GOING_RIGHT;
+		dim->steps_right = 0;
+		break;
+	}
+}
+
+static inline int net_dim_step(struct net_dim *dim)
+{
+	if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
+		return NET_DIM_TOO_TIRED;
+
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+	case NET_DIM_PARKING_TIRED:
+		break;
+	case NET_DIM_GOING_RIGHT:
+		if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
+			return NET_DIM_ON_EDGE;
+		dim->profile_ix++;
+		dim->steps_right++;
+		break;
+	case NET_DIM_GOING_LEFT:
+		if (dim->profile_ix == 0)
+			return NET_DIM_ON_EDGE;
+		dim->profile_ix--;
+		dim->steps_left++;
+		break;
+	}
+
+	dim->tired++;
+	return NET_DIM_STEPPED;
+}
+
+static inline void net_dim_park_on_top(struct net_dim *dim)
+{
+	dim->steps_right  = 0;
+	dim->steps_left   = 0;
+	dim->tired        = 0;
+	dim->tune_state   = NET_DIM_PARKING_ON_TOP;
+}
+
+static inline void net_dim_park_tired(struct net_dim *dim)
+{
+	dim->steps_right  = 0;
+	dim->steps_left   = 0;
+	dim->tune_state   = NET_DIM_PARKING_TIRED;
+}
+
+static inline void net_dim_exit_parking(struct net_dim *dim)
+{
+	dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT :
+					  NET_DIM_GOING_RIGHT;
+	net_dim_step(dim);
+}
+
+#define IS_SIGNIFICANT_DIFF(val, ref) \
+	(((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
+
+static inline int net_dim_stats_compare(struct net_dim_stats *curr,
+					struct net_dim_stats *prev)
+{
+	if (!prev->bpms)
+		return curr->bpms ? NET_DIM_STATS_BETTER :
+				    NET_DIM_STATS_SAME;
+
+	if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
+		return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER :
+						   NET_DIM_STATS_WORSE;
+
+	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
+		return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER :
+						   NET_DIM_STATS_WORSE;
+
+	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
+		return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER :
+						   NET_DIM_STATS_WORSE;
+
+	return NET_DIM_STATS_SAME;
+}
+
+static inline bool net_dim_decision(struct net_dim_stats *curr_stats,
+				    struct net_dim *dim)
+{
+	int prev_state = dim->tune_state;
+	int prev_ix = dim->profile_ix;
+	int stats_res;
+	int step_res;
+
+	switch (dim->tune_state) {
+	case NET_DIM_PARKING_ON_TOP:
+		stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+		if (stats_res != NET_DIM_STATS_SAME)
+			net_dim_exit_parking(dim);
+		break;
+
+	case NET_DIM_PARKING_TIRED:
+		dim->tired--;
+		if (!dim->tired)
+			net_dim_exit_parking(dim);
+		break;
+
+	case NET_DIM_GOING_RIGHT:
+	case NET_DIM_GOING_LEFT:
+		stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
+		if (stats_res != NET_DIM_STATS_BETTER)
+			net_dim_turn(dim);
+
+		if (net_dim_on_top(dim)) {
+			net_dim_park_on_top(dim);
+			break;
+		}
+
+		step_res = net_dim_step(dim);
+		switch (step_res) {
+		case NET_DIM_ON_EDGE:
+			net_dim_park_on_top(dim);
+			break;
+		case NET_DIM_TOO_TIRED:
+			net_dim_park_tired(dim);
+			break;
+		}
+
+		break;
+	}
+
+	if ((prev_state      != NET_DIM_PARKING_ON_TOP) ||
+	    (dim->tune_state != NET_DIM_PARKING_ON_TOP))
+		dim->prev_stats = *curr_stats;
+
+	return dim->profile_ix != prev_ix;
+}
+
+static inline void net_dim_sample(u16 event_ctr,
+				  u64 packets,
+				  u64 bytes,
+				  struct net_dim_sample *s)
+{
+	s->time	     = ktime_get();
+	s->pkt_ctr   = packets;
+	s->byte_ctr  = bytes;
+	s->event_ctr = event_ctr;
+}
+
+#define NET_DIM_NEVENTS 64
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
+
+static inline void net_dim_calc_stats(struct net_dim_sample *start,
+				      struct net_dim_sample *end,
+				      struct net_dim_stats *curr_stats)
+{
+	/* u32 holds up to 71 minutes, should be enough */
+	u32 delta_us = ktime_us_delta(end->time, start->time);
+	u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
+	u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
+			     start->byte_ctr);
+
+	if (!delta_us)
+		return;
+
+	curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
+	curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
+	curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
+					delta_us);
+}
+
+static inline void net_dim(struct net_dim *dim,
+			   struct net_dim_sample end_sample)
+{
+	struct net_dim_stats curr_stats;
+	u16 nevents;
+
+	switch (dim->state) {
+	case NET_DIM_MEASURE_IN_PROGRESS:
+		nevents = BIT_GAP(BITS_PER_TYPE(u16),
+				  end_sample.event_ctr,
+				  dim->start_sample.event_ctr);
+		if (nevents < NET_DIM_NEVENTS)
+			break;
+		net_dim_calc_stats(&dim->start_sample, &end_sample,
+				   &curr_stats);
+		if (net_dim_decision(&curr_stats, dim)) {
+			dim->state = NET_DIM_APPLY_NEW_PROFILE;
+			schedule_work(&dim->work);
+			break;
+		}
+		/* fall through */
+	case NET_DIM_START_MEASURE:
+		dim->state = NET_DIM_MEASURE_IN_PROGRESS;
+		break;
+	case NET_DIM_APPLY_NEW_PROFILE:
+		break;
+	}
+}
+
+#endif /* NET_DIM_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index b1b0ca7..db84c51 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -78,6 +78,8 @@ enum {
 	NETIF_F_HW_ESP_TX_CSUM_BIT,	/* ESP with TX checksum offload */
 	NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
 
+	NETIF_F_GRO_HW_BIT,		/* Hardware Generic receive offload */
+
 	/*
 	 * Add your fresh new feature above and remember to update
 	 * netdev_features_strings[] in net/core/ethtool.c and maybe
@@ -97,6 +99,7 @@ enum {
 #define NETIF_F_FRAGLIST	__NETIF_F(FRAGLIST)
 #define NETIF_F_FSO		__NETIF_F(FSO)
 #define NETIF_F_GRO		__NETIF_F(GRO)
+#define NETIF_F_GRO_HW		__NETIF_F(GRO_HW)
 #define NETIF_F_GSO		__NETIF_F(GSO)
 #define NETIF_F_GSO_ROBUST	__NETIF_F(GSO_ROBUST)
 #define NETIF_F_HIGHDMA		__NETIF_F(HIGHDMA)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ef789e1..ef7b348 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -44,6 +44,7 @@
 #include <net/dcbnl.h>
 #endif
 #include <net/netprio_cgroup.h>
+#include <net/xdp.h>
 
 #include <linux/netdev_features.h>
 #include <linux/neighbour.h>
@@ -686,6 +687,7 @@ struct netdev_rx_queue {
 #endif
 	struct kobject			kobj;
 	struct net_device		*dev;
+	struct xdp_rxq_info		xdp_rxq;
 } ____cacheline_aligned_in_smp;
 
 /*
@@ -804,7 +806,7 @@ enum bpf_netdev_command {
 	BPF_OFFLOAD_DESTROY,
 };
 
-struct bpf_ext_analyzer_ops;
+struct bpf_prog_offload_ops;
 struct netlink_ext_ack;
 
 struct netdev_bpf {
@@ -820,11 +822,13 @@ struct netdev_bpf {
 		struct {
 			u8 prog_attached;
 			u32 prog_id;
+			/* flags with which program was installed */
+			u32 prog_flags;
 		};
 		/* BPF_OFFLOAD_VERIFIER_PREP */
 		struct {
 			struct bpf_prog *prog;
-			const struct bpf_ext_analyzer_ops *ops; /* callee set */
+			const struct bpf_prog_offload_ops *ops; /* callee set */
 		} verifier;
 		/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
 		struct {
@@ -1724,7 +1728,7 @@ struct net_device {
 	const struct ndisc_ops *ndisc_ops;
 #endif
 
-#ifdef CONFIG_XFRM
+#ifdef CONFIG_XFRM_OFFLOAD
 	const struct xfrmdev_ops *xfrmdev_ops;
 #endif
 
@@ -1801,12 +1805,9 @@ struct net_device {
 	/* Interface address info used in eth_type_trans() */
 	unsigned char		*dev_addr;
 
-#ifdef CONFIG_SYSFS
 	struct netdev_rx_queue	*_rx;
-
 	unsigned int		num_rx_queues;
 	unsigned int		real_num_rx_queues;
-#endif
 
 	struct bpf_prog __rcu	*xdp_prog;
 	unsigned long		gro_flush_timeout;
@@ -2791,7 +2792,9 @@ struct softnet_data {
 	struct Qdisc		*output_queue;
 	struct Qdisc		**output_queue_tailp;
 	struct sk_buff		*completion_queue;
-
+#ifdef CONFIG_XFRM_OFFLOAD
+	struct sk_buff_head	xfrm_backlog;
+#endif
 #ifdef CONFIG_RPS
 	/* input_queue_head should be written by cpu owning this struct,
 	 * and only read by other cpus. Worth using a cache line.
@@ -3323,14 +3326,15 @@ int dev_get_phys_port_id(struct net_device *dev,
 int dev_get_phys_port_name(struct net_device *dev,
 			   char *name, size_t len);
 int dev_change_proto_down(struct net_device *dev, bool proto_down);
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 				    struct netdev_queue *txq, int *ret);
 
 typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
 		      int fd, u32 flags);
-u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id);
+void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+		     struct netdev_bpf *xdp);
 
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
@@ -4399,11 +4403,11 @@ do {								\
  * file/line information and a backtrace.
  */
 #define netdev_WARN(dev, format, args...)			\
-	WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),	\
+	WARN(1, "netdevice: %s%s: " format, netdev_name(dev),	\
 	     netdev_reg_state(dev), ##args)
 
-#define netdev_WARN_ONCE(dev, condition, format, arg...)		\
-	WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev)	\
+#define netdev_WARN_ONCE(dev, format, args...)				\
+	WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev),	\
 		  netdev_reg_state(dev), ##args)
 
 /* netif printk helpers, similar to netdev_printk */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index b24e9b1..85a1a0b 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -67,6 +67,7 @@ struct nf_hook_ops {
 	struct net_device	*dev;
 	void			*priv;
 	u_int8_t		pf;
+	bool			nat_hook;
 	unsigned int		hooknum;
 	/* Hooks are ordered in ascending priority. */
 	int			priority;
@@ -77,17 +78,28 @@ struct nf_hook_entry {
 	void				*priv;
 };
 
+struct nf_hook_entries_rcu_head {
+	struct rcu_head head;
+	void	*allocation;
+};
+
 struct nf_hook_entries {
 	u16				num_hook_entries;
 	/* padding */
 	struct nf_hook_entry		hooks[];
 
-	/* trailer: pointers to original orig_ops of each hook.
+	/* trailer: pointers to original orig_ops of each hook,
+	 * followed by rcu_head and scratch space used for freeing
+	 * the structure via call_rcu.
 	 *
-	 * This is not part of struct nf_hook_entry since its only
-	 * needed in slow path (hook register/unregister).
-	 *
+	 *   This is not part of struct nf_hook_entry since its only
+	 *   needed in slow path (hook register/unregister):
 	 * const struct nf_hook_ops     *orig_ops[]
+	 *
+	 *   For the same reason, we store this at end -- its
+	 *   only needed when a hook is deleted, not during
+	 *   packet path processing:
+	 * struct nf_hook_entries_rcu_head     head
 	 */
 };
 
@@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
 			  struct net_device *indev, struct net_device *outdev,
 			  int (*okfn)(struct net *, struct sock *, struct sk_buff *))
 {
-	struct nf_hook_entries *hook_head;
+	struct nf_hook_entries *hook_head = NULL;
 	int ret = 1;
 
 #ifdef HAVE_JUMP_LABEL
@@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
 #endif
 
 	rcu_read_lock();
-	hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+	switch (pf) {
+	case NFPROTO_IPV4:
+		hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+		break;
+	case NFPROTO_IPV6:
+		hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+		break;
+	case NFPROTO_ARP:
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+		hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
+#endif
+		break;
+	case NFPROTO_BRIDGE:
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+		hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
+#endif
+		break;
+#if IS_ENABLED(CONFIG_DECNET)
+	case NFPROTO_DECNET:
+		hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
+		break;
+#endif
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+
 	if (hook_head) {
 		struct nf_hook_state state;
 
@@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
 struct flowi;
 struct nf_queue_entry;
 
-struct nf_afinfo {
-	unsigned short	family;
-	__sum16		(*checksum)(struct sk_buff *skb, unsigned int hook,
-				    unsigned int dataoff, u_int8_t protocol);
-	__sum16		(*checksum_partial)(struct sk_buff *skb,
-					    unsigned int hook,
-					    unsigned int dataoff,
-					    unsigned int len,
-					    u_int8_t protocol);
-	int		(*route)(struct net *net, struct dst_entry **dst,
-				 struct flowi *fl, bool strict);
-	void		(*saveroute)(const struct sk_buff *skb,
-				     struct nf_queue_entry *entry);
-	int		(*reroute)(struct net *net, struct sk_buff *skb,
-				   const struct nf_queue_entry *entry);
-	int		route_key_size;
-};
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+		    unsigned int dataoff, u_int8_t protocol,
+		    unsigned short family);
 
-extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
-static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
-{
-	return rcu_dereference(nf_afinfo[family]);
-}
-
-static inline __sum16
-nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
-	    u_int8_t protocol, unsigned short family)
-{
-	const struct nf_afinfo *afinfo;
-	__sum16 csum = 0;
-
-	rcu_read_lock();
-	afinfo = nf_get_afinfo(family);
-	if (afinfo)
-		csum = afinfo->checksum(skb, hook, dataoff, protocol);
-	rcu_read_unlock();
-	return csum;
-}
-
-static inline __sum16
-nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
-		    unsigned int dataoff, unsigned int len,
-		    u_int8_t protocol, unsigned short family)
-{
-	const struct nf_afinfo *afinfo;
-	__sum16 csum = 0;
-
-	rcu_read_lock();
-	afinfo = nf_get_afinfo(family);
-	if (afinfo)
-		csum = afinfo->checksum_partial(skb, hook, dataoff, len,
-						protocol);
-	rcu_read_unlock();
-	return csum;
-}
-
-int nf_register_afinfo(const struct nf_afinfo *afinfo);
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			    unsigned int dataoff, unsigned int len,
+			    u_int8_t protocol, unsigned short family);
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+	     bool strict, unsigned short family);
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
 
 #include <net/flow.h>
 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 8e42253..34fc80f 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -122,6 +122,8 @@ struct ip_set_ext {
 	u64 bytes;
 	char *comment;
 	u32 timeout;
+	u8 packets_op;
+	u8 bytes_op;
 };
 
 struct ip_set;
@@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
 				 struct ip_set_ext *ext);
 extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
 				 const void *e, bool active);
+extern bool ip_set_match_extensions(struct ip_set *set,
+				    const struct ip_set_ext *ext,
+				    struct ip_set_ext *mext,
+				    u32 flags, void *data);
 
 static inline int
 ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h
index bb6fba4..3d33a2c 100644
--- a/include/linux/netfilter/ipset/ip_set_counter.h
+++ b/include/linux/netfilter/ipset/ip_set_counter.h
@@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter)
 	return (u64)atomic64_read(&(counter)->packets);
 }
 
+static inline bool
+ip_set_match_counter(u64 counter, u64 match, u8 op)
+{
+	switch (op) {
+	case IPSET_COUNTER_NONE:
+		return true;
+	case IPSET_COUNTER_EQ:
+		return counter == match;
+	case IPSET_COUNTER_NE:
+		return counter != match;
+	case IPSET_COUNTER_LT:
+		return counter < match;
+	case IPSET_COUNTER_GT:
+		return counter > match;
+	}
+	return false;
+}
+
 static inline void
 ip_set_update_counter(struct ip_set_counter *counter,
-		      const struct ip_set_ext *ext,
-		      struct ip_set_ext *mext, u32 flags)
+		      const struct ip_set_ext *ext, u32 flags)
 {
 	if (ext->packets != ULLONG_MAX &&
 	    !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
 		ip_set_add_bytes(ext->bytes, counter);
 		ip_set_add_packets(ext->packets, counter);
 	}
-	if (flags & IPSET_FLAG_MATCH_COUNTERS) {
-		mext->packets = ip_set_get_packets(counter);
-		mext->bytes = ip_set_get_bytes(counter);
-	}
 }
 
 static inline bool
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 33f7530..1313b35 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target,
 
 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 				    const char *name);
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+					    const char *name);
 void xt_table_unlock(struct xt_table *t);
 
 int xt_proto_init(struct net *net, u_int8_t af);
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index dc6111a..8dddfb1 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -4,7 +4,17 @@
 
 #include <uapi/linux/netfilter.h>
 
+/* in/out/forward only */
+#define NF_ARP_NUMHOOKS 3
+
+/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
+#define NF_DN_NUMHOOKS 7
+
+#if IS_ENABLED(CONFIG_DECNET)
 /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS 8
+#define NF_MAX_HOOKS	NF_DN_NUMHOOKS
+#else
+#define NF_MAX_HOOKS	NF_INET_NUMHOOKS
+#endif
 
 #endif
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 98c03b2..b31dabf 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,53 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip_rt_info {
+	__be32 daddr;
+	__be32 saddr;
+	u_int8_t tos;
+	u_int32_t mark;
+};
+
 int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
+
+struct nf_queue_entry;
+
+#ifdef CONFIG_INET
 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 		       unsigned int dataoff, u_int8_t protocol);
+__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			       unsigned int dataoff, unsigned int len,
+			       u_int8_t protocol);
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+		bool strict);
+int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
+#else
+static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+				     unsigned int dataoff, u_int8_t protocol)
+{
+	return 0;
+}
+static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
+					     unsigned int hook,
+					     unsigned int dataoff,
+					     unsigned int len,
+					     u_int8_t protocol)
+{
+	return 0;
+}
+static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
+			      struct flowi *fl, bool strict)
+{
+	return -EOPNOTSUPP;
+}
+static inline int nf_ip_reroute(struct sk_buff *skb,
+				const struct nf_queue_entry *entry)
+{
+	return -EOPNOTSUPP;
+}
+#endif /* CONFIG_INET */
+
 #endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 47c6b04..288c597 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,6 +9,17 @@
 
 #include <uapi/linux/netfilter_ipv6.h>
 
+/* Extra routing may needed on local out, as the QUEUE target never returns
+ * control to the table.
+ */
+struct ip6_rt_info {
+	struct in6_addr daddr;
+	struct in6_addr saddr;
+	u_int32_t mark;
+};
+
+struct nf_queue_entry;
+
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
  * if IPv6 is a module.
@@ -19,6 +30,14 @@ struct nf_ipv6_ops {
 	void (*route_input)(struct sk_buff *skb);
 	int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
 			int (*output)(struct net *, struct sock *, struct sk_buff *));
+	__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
+			    unsigned int dataoff, u_int8_t protocol);
+	__sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
+				    unsigned int dataoff, unsigned int len,
+				    u_int8_t protocol);
+	int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
+		     bool strict);
+	int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
 };
 
 #ifdef CONFIG_NETFILTER
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c170c92..0314e07 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1072,6 +1072,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
 int pci_set_cacheline_size(struct pci_dev *dev);
 #define HAVE_PCI_SET_MWI
 int __must_check pci_set_mwi(struct pci_dev *dev);
+int __must_check pcim_set_mwi(struct pci_dev *dev);
 int pci_try_set_mwi(struct pci_dev *dev);
 void pci_clear_mwi(struct pci_dev *dev);
 void pci_intx(struct pci_dev *dev, int enable);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index dc82a07..135aba5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -59,6 +59,7 @@
 
 #define PHY_HAS_INTERRUPT	0x00000001
 #define PHY_IS_INTERNAL		0x00000002
+#define PHY_RST_AFTER_CLK_EN	0x00000004
 #define MDIO_DEVICE_IS_PHY	0x80000000
 
 /* Interface Mode definitions */
@@ -468,7 +469,6 @@ struct phy_device {
 	/* Interrupt and Polling infrastructure */
 	struct work_struct phy_queue;
 	struct delayed_work state_queue;
-	atomic_t irq_disable;
 
 	struct mutex lock;
 
@@ -497,19 +497,19 @@ struct phy_device {
  * flags: A bitfield defining certain other features this PHY
  *   supports (like interrupts)
  *
- * The drivers must implement config_aneg and read_status.  All
- * other functions are optional. Note that none of these
- * functions should be called from interrupt time.  The goal is
- * for the bus read/write functions to be able to block when the
- * bus transaction is happening, and be freed up by an interrupt
- * (The MPC85xx has this ability, though it is not currently
- * supported in the driver).
+ * All functions are optional. If config_aneg or read_status
+ * are not implemented, the phy core uses the genphy versions.
+ * Note that none of these functions should be called from
+ * interrupt time. The goal is for the bus read/write functions
+ * to be able to block when the bus transaction is happening,
+ * and be freed up by an interrupt (The MPC85xx has this ability,
+ * though it is not currently supported in the driver).
  */
 struct phy_driver {
 	struct mdio_driver_common mdiodrv;
 	u32 phy_id;
 	char *name;
-	unsigned int phy_id_mask;
+	u32 phy_id_mask;
 	u32 features;
 	u32 flags;
 	const void *driver_data;
@@ -634,6 +634,9 @@ struct phy_driver {
 	int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
 			 u16 val);
 
+	int (*read_page)(struct phy_device *dev);
+	int (*write_page)(struct phy_device *dev, int page);
+
 	/* Get the size and type of the eeprom contained within a plug-in
 	 * module */
 	int (*module_info)(struct phy_device *dev,
@@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
 size_t phy_speeds(unsigned int *speeds, size_t size,
 		  unsigned long *mask, size_t maxbit);
 
+void phy_resolve_aneg_linkmode(struct phy_device *phydev);
+
 /**
  * phy_read_mmd - Convenience function for reading a register
  * from an MMD on a given PHY.
@@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
 }
 
 /**
+ * __phy_read - convenience function for reading a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to read
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_read(struct phy_device *phydev, u32 regnum)
+{
+	return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum);
+}
+
+/**
  * phy_write - Convenience function for writing a given PHY register
  * @phydev: the phy_device struct
  * @regnum: register number to write
@@ -731,6 +748,23 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
 }
 
 /**
+ * __phy_write - Convenience function for writing a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * The caller must have taken the MDIO bus lock.
+ */
+static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+	return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum,
+			       val);
+}
+
+int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set);
+
+/**
  * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
  * @phydev: the phy_device struct
  *
@@ -763,6 +797,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
 };
 
 /**
+ * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ *   negotiation
+ * @mode: one of &enum phy_interface_t
+ *
+ * Returns true if the phy interface mode uses the 16-bit negotiation
+ * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
+ */
+static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
+{
+	return mode == PHY_INTERFACE_MODE_1000BASEX ||
+	       mode == PHY_INTERFACE_MODE_2500BASEX;
+}
+
+/**
  * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
  * is RGMII (all variants)
  * @phydev: the phy_device struct
@@ -794,6 +842,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
  */
 int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
 
+int phy_save_page(struct phy_device *phydev);
+int phy_select_page(struct phy_device *phydev, int page);
+int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
+int phy_read_paged(struct phy_device *phydev, int page, u32 regnum);
+int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val);
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+		     u16 mask, u16 set);
+
 struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
 				     bool is_c45,
 				     struct phy_c45_device_ids *c45_ids);
@@ -840,13 +896,11 @@ int phy_aneg_done(struct phy_device *phydev);
 
 int phy_stop_interrupts(struct phy_device *phydev);
 int phy_restart_aneg(struct phy_device *phydev);
+int phy_reset_after_clk_enable(struct phy_device *phydev);
 
-static inline int phy_read_status(struct phy_device *phydev)
+static inline void phy_device_reset(struct phy_device *phydev, int value)
 {
-	if (!phydev->drv)
-		return -EIO;
-
-	return phydev->drv->read_status(phydev);
+	mdio_device_reset(&phydev->mdio, value);
 }
 
 #define phydev_err(_phydev, format, args...)	\
@@ -889,6 +943,18 @@ int genphy_c45_read_lpa(struct phy_device *phydev);
 int genphy_c45_read_pma(struct phy_device *phydev);
 int genphy_c45_pma_setup_forced(struct phy_device *phydev);
 int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+int genphy_c45_read_mdix(struct phy_device *phydev);
+
+static inline int phy_read_status(struct phy_device *phydev)
+{
+	if (!phydev->drv)
+		return -EIO;
+
+	if (phydev->drv->read_status)
+		return phydev->drv->read_status(phydev);
+	else
+		return genphy_read_status(phydev);
+}
 
 void phy_driver_unregister(struct phy_driver *drv);
 void phy_drivers_unregister(struct phy_driver *drv, int n);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index cf6392d..ee54453 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev);
 extern int fixed_phy_set_link_update(struct phy_device *phydev,
 			int (*link_update)(struct net_device *,
 					   struct fixed_phy_status *));
-extern int fixed_phy_update_state(struct phy_device *phydev,
-			   const struct fixed_phy_status *status,
-			   const struct fixed_phy_status *changed);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
 				struct fixed_phy_status *status,
@@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
 {
 	return -ENODEV;
 }
-static inline int fixed_phy_update_state(struct phy_device *phydev,
-			   const struct fixed_phy_status *status,
-			   const struct fixed_phy_status *changed)
-{
-	return -ENODEV;
-}
 #endif /* CONFIG_FIXED_PHY */
 
 #endif /* __PHY_FIXED_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index af67edd..bd137c2 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -7,6 +7,7 @@
 
 struct device_node;
 struct ethtool_cmd;
+struct fwnode_handle;
 struct net_device;
 
 enum {
@@ -20,19 +21,31 @@ enum {
 
 	MLO_AN_PHY = 0,	/* Conventional PHY */
 	MLO_AN_FIXED,	/* Fixed-link mode */
-	MLO_AN_SGMII,	/* Cisco SGMII protocol */
-	MLO_AN_8023Z,	/* 1000base-X protocol */
+	MLO_AN_INBAND,	/* In-band protocol */
 };
 
 static inline bool phylink_autoneg_inband(unsigned int mode)
 {
-	return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z;
+	return mode == MLO_AN_INBAND;
 }
 
+/**
+ * struct phylink_link_state - link state structure
+ * @advertising: ethtool bitmask containing advertised link modes
+ * @lp_advertising: ethtool bitmask containing link partner advertised link
+ *   modes
+ * @interface: link &typedef phy_interface_t mode
+ * @speed: link speed, one of the SPEED_* constants.
+ * @duplex: link duplex mode, one of DUPLEX_* constants.
+ * @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @link: true if the link is up.
+ * @an_enabled: true if autonegotiation is enabled/desired.
+ * @an_complete: true if autonegotiation has completed.
+ */
 struct phylink_link_state {
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
 	__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
-	phy_interface_t interface;	/* PHY_INTERFACE_xxx */
+	phy_interface_t interface;
 	int speed;
 	int duplex;
 	int pause;
@@ -41,72 +54,145 @@ struct phylink_link_state {
 	unsigned int an_complete:1;
 };
 
+/**
+ * struct phylink_mac_ops - MAC operations structure.
+ * @validate: Validate and update the link configuration.
+ * @mac_link_state: Read the current link state from the hardware.
+ * @mac_config: configure the MAC for the selected mode and state.
+ * @mac_an_restart: restart 802.3z BaseX autonegotiation.
+ * @mac_link_down: take the link down.
+ * @mac_link_up: allow the link to come up.
+ *
+ * The individual methods are described more fully below.
+ */
 struct phylink_mac_ops {
-	/**
-	 * validate: validate and update the link configuration
-	 * @ndev: net_device structure associated with MAC
-	 * @config: configuration to validate
-	 *
-	 * Update the %config->supported and %config->advertised masks
-	 * clearing bits that can not be supported.
-	 *
-	 * Note: the PHY may be able to transform from one connection
-	 * technology to another, so, eg, don't clear 1000BaseX just
-	 * because the MAC is unable to support it.  This is more about
-	 * clearing unsupported speeds and duplex settings.
-	 *
-	 * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX
-	 * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
-	 * based on %config->advertised and/or %config->speed.
-	 */
 	void (*validate)(struct net_device *ndev, unsigned long *supported,
 			 struct phylink_link_state *state);
-
-	/* Read the current link state from the hardware */
-	int (*mac_link_state)(struct net_device *, struct phylink_link_state *);
-
-	/* Configure the MAC */
-	/**
-	 * mac_config: configure the MAC for the selected mode and state
-	 * @ndev: net_device structure for the MAC
-	 * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII
-	 * @state: state structure
-	 *
-	 * The action performed depends on the currently selected mode:
-	 *
-	 * %MLO_AN_FIXED, %MLO_AN_PHY:
-	 *   set the specified speed, duplex, pause mode, and phy interface
-	 *   mode in the provided @state.
-	 * %MLO_AN_8023Z:
-	 *   place the link in 1000base-X mode, advertising the parameters
-	 *   given in advertising in @state.
-	 * %MLO_AN_SGMII:
-	 *   place the link in Cisco SGMII mode - there is no advertisment
-	 *   to make as the PHY communicates the speed and duplex to the
-	 *   MAC over the in-band control word.  Configuration of the pause
-	 *   mode is as per MLO_AN_PHY since this is not included.
-	 */
+	int (*mac_link_state)(struct net_device *ndev,
+			      struct phylink_link_state *state);
 	void (*mac_config)(struct net_device *ndev, unsigned int mode,
 			   const struct phylink_link_state *state);
-
-	/**
-	 * mac_an_restart: restart 802.3z BaseX autonegotiation
-	 * @ndev: net_device structure for the MAC
-	 */
 	void (*mac_an_restart)(struct net_device *ndev);
-
-	void (*mac_link_down)(struct net_device *, unsigned int mode);
-	void (*mac_link_up)(struct net_device *, unsigned int mode,
-			    struct phy_device *);
+	void (*mac_link_down)(struct net_device *ndev, unsigned int mode);
+	void (*mac_link_up)(struct net_device *ndev, unsigned int mode,
+			    struct phy_device *phy);
 };
 
-struct phylink *phylink_create(struct net_device *, struct device_node *,
+#if 0 /* For kernel-doc purposes only. */
+/**
+ * validate - Validate and update the link configuration
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Clear bits in the @supported and @state->advertising masks that
+ * are not supportable by the MAC.
+ *
+ * Note that the PHY may be able to transform from one connection
+ * technology to another, so, eg, don't clear 1000BaseX just
+ * because the MAC is unable to BaseX mode. This is more about
+ * clearing unsupported speeds and duplex settings.
+ *
+ * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
+ * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
+ * based on @state->advertising and/or @state->speed and update
+ * @state->interface accordingly.
+ */
+void validate(struct net_device *ndev, unsigned long *supported,
+	      struct phylink_link_state *state);
+
+/**
+ * mac_link_state() - Read the current link state from the hardware
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * Read the current link state from the MAC, reporting the current
+ * speed in @state->speed, duplex mode in @state->duplex, pause mode
+ * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link
+ * up state in @state->link.
+ */
+int mac_link_state(struct net_device *ndev,
+		   struct phylink_link_state *state);
+
+/**
+ * mac_config() - configure the MAC for the selected mode and state
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @state: a pointer to a &struct phylink_link_state.
+ *
+ * The action performed depends on the currently selected mode:
+ *
+ * %MLO_AN_FIXED, %MLO_AN_PHY:
+ *   Configure the specified @state->speed, @state->duplex and
+ *   @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode.
+ *
+ * %MLO_AN_INBAND:
+ *   place the link in an inband negotiation mode (such as 802.3z
+ *   1000base-X or Cisco SGMII mode depending on the @state->interface
+ *   mode). In both cases, link state management (whether the link
+ *   is up or not) is performed by the MAC, and reported via the
+ *   mac_link_state() callback. Changes in link state must be made
+ *   by calling phylink_mac_change().
+ *
+ *   If in 802.3z mode, the link speed is fixed, dependent on the
+ *   @state->interface. Duplex is negotiated, and pause is advertised
+ *   according to @state->an_enabled, @state->pause and
+ *   @state->advertising flags. Beware of MACs which only support full
+ *   duplex at gigabit and higher speeds.
+ *
+ *   If in Cisco SGMII mode, the link speed and duplex mode are passed
+ *   in the serial bitstream 16-bit configuration word, and the MAC
+ *   should be configured to read these bits and acknowledge the
+ *   configuration word. Nothing is advertised by the MAC. The MAC is
+ *   responsible for reading the configuration word and configuring
+ *   itself accordingly.
+ */
+void mac_config(struct net_device *ndev, unsigned int mode,
+		const struct phylink_link_state *state);
+
+/**
+ * mac_an_restart() - restart 802.3z BaseX autonegotiation
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ */
+void mac_an_restart(struct net_device *ndev);
+
+/**
+ * mac_link_down() - take the link down
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), force the link down and disable any
+ * Energy Efficient Ethernet MAC configuration.
+ */
+void mac_link_down(struct net_device *ndev, unsigned int mode);
+
+/**
+ * mac_link_up() - allow the link to come up
+ * @ndev: a pointer to a &struct net_device for the MAC.
+ * @mode: link autonegotiation mode
+ * @phy: any attached phy
+ *
+ * If @mode is not an in-band negotiation mode (as defined by
+ * phylink_autoneg_inband()), allow the link to come up. If @phy
+ * is non-%NULL, configure Energy Efficient Ethernet by calling
+ * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ */
+void mac_link_up(struct net_device *ndev, unsigned int mode,
+		 struct phy_device *phy);
+#endif
+
+struct phylink *phylink_create(struct net_device *, struct fwnode_handle *,
 	phy_interface_t iface, const struct phylink_mac_ops *ops);
 void phylink_destroy(struct phylink *);
 
 int phylink_connect_phy(struct phylink *, struct phy_device *);
-int phylink_of_phy_connect(struct phylink *, struct device_node *);
+int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
 void phylink_disconnect_phy(struct phylink *);
+int phylink_fixed_state_cb(struct phylink *,
+			   void (*cb)(struct net_device *dev,
+				      struct phylink_link_state *));
 
 void phylink_mac_change(struct phylink *, bool up);
 
@@ -128,7 +214,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *,
 int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *);
 int phylink_ethtool_get_module_eeprom(struct phylink *,
 				      struct ethtool_eeprom *, u8 *);
-int phylink_init_eee(struct phylink *, bool);
 int phylink_get_eee_err(struct phylink *);
 int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
 int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 2ff18c9..d31cb62 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd);
 #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
 extern void *ns_get_path(struct path *path, struct task_struct *task,
 			const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+			    void *private_data);
 
 extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
 			const struct proc_ns_operations *ns_ops);
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6866df4..13fb06a 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -447,7 +447,12 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
 
 static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
 {
-	return kcalloc(size, sizeof(void *), gfp);
+	/* Allocate an extra dummy element at end of ring to avoid consumer head
+	 * or produce head access past the end of the array. Possible when
+	 * producer/consumer operations and __ptr_ring_peek operations run in
+	 * parallel.
+	 */
+	return kcalloc(size + 1, sizeof(void *), gfp);
 }
 
 static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 39e2a2a..2b3b350 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -32,14 +32,15 @@
 
 #ifndef _COMMON_HSI_H
 #define _COMMON_HSI_H
+
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
 
 /* dma_addr_t manip */
-#define PTR_LO(x)               ((u32)(((uintptr_t)(x)) & 0xffffffff))
-#define PTR_HI(x)               ((u32)((((uintptr_t)(x)) >> 16) >> 16))
+#define PTR_LO(x)		((u32)(((uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x)		((u32)((((uintptr_t)(x)) >> 16) >> 16))
 #define DMA_LO_LE(x)		cpu_to_le32(lower_32_bits(x))
 #define DMA_HI_LE(x)		cpu_to_le32(upper_32_bits(x))
 #define DMA_REGPAIR_LE(x, val)	do { \
@@ -47,39 +48,45 @@
 					(x).lo = DMA_LO_LE((val)); \
 				} while (0)
 
-#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
-#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
-#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
+#define HILO_GEN(hi, lo, type)		((((type)(hi)) << 32) + (lo))
+#define HILO_64(hi, lo) \
+	HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64)
+#define HILO_64_REGPAIR(regpair) ({ \
+	typeof(regpair) __regpair = (regpair); \
+	HILO_64(__regpair.hi, __regpair.lo); })
 #define HILO_DMA_REGPAIR(regpair)	((dma_addr_t)HILO_64_REGPAIR(regpair))
 
 #ifndef __COMMON_HSI__
 #define __COMMON_HSI__
 
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
 
-#define X_FINAL_CLEANUP_AGG_INT 1
+#define X_FINAL_CLEANUP_AGG_INT			1
 
-#define EVENT_RING_PAGE_SIZE_BYTES          4096
+#define EVENT_RING_PAGE_SIZE_BYTES		4096
 
-#define NUM_OF_GLOBAL_QUEUES                            128
-#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE        64
+#define NUM_OF_GLOBAL_QUEUES			128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE	64
 
-#define ISCSI_CDU_TASK_SEG_TYPE       0
-#define FCOE_CDU_TASK_SEG_TYPE        0
-#define RDMA_CDU_TASK_SEG_TYPE        1
+#define ISCSI_CDU_TASK_SEG_TYPE			0
+#define FCOE_CDU_TASK_SEG_TYPE			0
+#define RDMA_CDU_TASK_SEG_TYPE			1
 
-#define FW_ASSERT_GENERAL_ATTN_IDX    32
+#define FW_ASSERT_GENERAL_ATTN_IDX		32
 
-#define MAX_PINNED_CCFC                 32
+#define MAX_PINNED_CCFC				32
 
 /* Queue Zone sizes in bytes */
-#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 16
-#define USTORM_QZONE_SIZE 8
-#define XSTORM_QZONE_SIZE 8
-#define YSTORM_QZONE_SIZE 0
-#define PSTORM_QZONE_SIZE 0
+#define TSTORM_QZONE_SIZE	8
+#define MSTORM_QZONE_SIZE	16
+#define USTORM_QZONE_SIZE	8
+#define XSTORM_QZONE_SIZE	8
+#define YSTORM_QZONE_SIZE	0
+#define PSTORM_QZONE_SIZE	0
 
-#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG	7
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG		7
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT	16
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE	48
 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD	112
@@ -102,8 +109,8 @@
 #define MAX_NUM_LL2_TX_STATS_COUNTERS	48
 
 #define FW_MAJOR_VERSION	8
-#define FW_MINOR_VERSION	20
-#define FW_REVISION_VERSION	0
+#define FW_MINOR_VERSION	33
+#define FW_REVISION_VERSION	1
 #define FW_ENGINEERING_VERSION	0
 
 /***********************/
@@ -115,10 +122,10 @@
 #define MAX_NUM_PORTS_BB	(2)
 #define MAX_NUM_PORTS		(MAX_NUM_PORTS_K2)
 
-#define MAX_NUM_PFS_K2	(16)
-#define MAX_NUM_PFS_BB	(8)
-#define MAX_NUM_PFS	(MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+#define MAX_NUM_PFS_K2		(16)
+#define MAX_NUM_PFS_BB		(8)
+#define MAX_NUM_PFS		(MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP	(16) /* On both engines */
 
 #define MAX_NUM_VFS_K2	(192)
 #define MAX_NUM_VFS_BB	(120)
@@ -141,29 +148,14 @@
 /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
 #define NUM_PHYS_TCS_4PORT_K2	(4)
 #define NUM_OF_PHYS_TCS		(8)
-
+#define PURE_LB_TC		NUM_OF_PHYS_TCS
 #define NUM_TCS_4PORT_K2	(NUM_PHYS_TCS_4PORT_K2 + 1)
 #define NUM_OF_TCS		(NUM_OF_PHYS_TCS + 1)
 
-#define LB_TC			(NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO		(8)
-
-#define MAX_NUM_VOQS_K2		(NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB		(NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS		(MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS		(NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
-
 /* CIDs */
-#define NUM_OF_CONNECTION_TYPES	(8)
-#define NUM_OF_LCIDS		(320)
-#define NUM_OF_LTIDS		(320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4	(375e6)
-#define STORM_CLK_FREQ_E4	(1000e6)
-#define CLK25M_CLK_FREQ_E4	(25e6)
+#define NUM_OF_CONNECTION_TYPES_E4	(8)
+#define NUM_OF_LCIDS			(320)
+#define NUM_OF_LTIDS			(320)
 
 /* Global PXP windows (GTT) */
 #define NUM_OF_GTT		19
@@ -172,17 +164,17 @@
 #define GTT_DWORD_SIZE		BIT(GTT_DWORD_SIZE_BITS)
 
 /* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION	10
 
 /*****************/
 /* CDU CONSTANTS */
 /*****************/
 
-#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
-#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT			(17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK			(0x1ffff)
 
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT	(12)
-#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK	(0xfff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT		(12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK		(0xfff)
 
 #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT			(0)
 #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT	(1)
@@ -201,45 +193,45 @@
 #define DQ_DEMS_TOE_LOCAL_ADV_WND	4
 #define DQ_DEMS_ROCE_CQ_CONS		7
 
-/* XCM agg val selection */
-#define DQ_XCM_AGG_VAL_SEL_WORD2  0
-#define DQ_XCM_AGG_VAL_SEL_WORD3  1
-#define DQ_XCM_AGG_VAL_SEL_WORD4  2
-#define DQ_XCM_AGG_VAL_SEL_WORD5  3
-#define DQ_XCM_AGG_VAL_SEL_REG3   4
-#define DQ_XCM_AGG_VAL_SEL_REG4   5
-#define DQ_XCM_AGG_VAL_SEL_REG5   6
-#define DQ_XCM_AGG_VAL_SEL_REG6   7
+/* XCM agg val selection (HW) */
+#define DQ_XCM_AGG_VAL_SEL_WORD2	0
+#define DQ_XCM_AGG_VAL_SEL_WORD3	1
+#define DQ_XCM_AGG_VAL_SEL_WORD4	2
+#define DQ_XCM_AGG_VAL_SEL_WORD5	3
+#define DQ_XCM_AGG_VAL_SEL_REG3		4
+#define DQ_XCM_AGG_VAL_SEL_REG4		5
+#define DQ_XCM_AGG_VAL_SEL_REG5		6
+#define DQ_XCM_AGG_VAL_SEL_REG6		7
 
-/* XCM agg val selection */
-#define	DQ_XCM_CORE_TX_BD_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD3
-#define	DQ_XCM_CORE_TX_BD_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define	DQ_XCM_CORE_SPQ_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define	DQ_XCM_ETH_EDPM_NUM_BDS_CMD	DQ_XCM_AGG_VAL_SEL_WORD2
-#define	DQ_XCM_ETH_TX_BD_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD3
-#define	DQ_XCM_ETH_TX_BD_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define	DQ_XCM_ETH_GO_TO_BD_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_FCOE_SQ_CONS_CMD             DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_FCOE_SQ_PROD_CMD             DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_FCOE_X_FERQ_PROD_CMD         DQ_XCM_AGG_VAL_SEL_WORD5
-#define DQ_XCM_ISCSI_SQ_CONS_CMD	DQ_XCM_AGG_VAL_SEL_WORD3
-#define DQ_XCM_ISCSI_SQ_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD	DQ_XCM_AGG_VAL_SEL_REG6
-#define DQ_XCM_ROCE_SQ_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_TX_BD_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
-#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD	DQ_XCM_AGG_VAL_SEL_REG3
-#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+/* XCM agg val selection (FW) */
+#define	DQ_XCM_CORE_TX_BD_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD3
+#define	DQ_XCM_CORE_TX_BD_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define	DQ_XCM_CORE_SPQ_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define	DQ_XCM_ETH_EDPM_NUM_BDS_CMD		DQ_XCM_AGG_VAL_SEL_WORD2
+#define	DQ_XCM_ETH_TX_BD_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD3
+#define	DQ_XCM_ETH_TX_BD_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define	DQ_XCM_ETH_GO_TO_BD_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_FCOE_SQ_CONS_CMD			DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD			DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD		DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD	DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD		DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD			DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD		DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD		DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD	DQ_XCM_AGG_VAL_SEL_REG4
 
 /* UCM agg val selection (HW) */
 #define	DQ_UCM_AGG_VAL_SEL_WORD0	0
 #define	DQ_UCM_AGG_VAL_SEL_WORD1	1
 #define	DQ_UCM_AGG_VAL_SEL_WORD2	2
 #define	DQ_UCM_AGG_VAL_SEL_WORD3	3
-#define	DQ_UCM_AGG_VAL_SEL_REG0	4
-#define	DQ_UCM_AGG_VAL_SEL_REG1	5
-#define	DQ_UCM_AGG_VAL_SEL_REG2	6
-#define	DQ_UCM_AGG_VAL_SEL_REG3	7
+#define	DQ_UCM_AGG_VAL_SEL_REG0		4
+#define	DQ_UCM_AGG_VAL_SEL_REG1		5
+#define	DQ_UCM_AGG_VAL_SEL_REG2		6
+#define	DQ_UCM_AGG_VAL_SEL_REG3		7
 
 /* UCM agg val selection (FW) */
 #define DQ_UCM_ETH_PMD_TX_CONS_CMD	DQ_UCM_AGG_VAL_SEL_WORD2
@@ -263,7 +255,7 @@
 #define DQ_TCM_ROCE_RQ_PROD_CMD	\
 	DQ_TCM_AGG_VAL_SEL_WORD0
 
-/* XCM agg counter flag selection */
+/* XCM agg counter flag selection (HW) */
 #define	DQ_XCM_AGG_FLG_SHIFT_BIT14	0
 #define	DQ_XCM_AGG_FLG_SHIFT_BIT15	1
 #define	DQ_XCM_AGG_FLG_SHIFT_CF12	2
@@ -273,20 +265,20 @@
 #define	DQ_XCM_AGG_FLG_SHIFT_CF22	6
 #define	DQ_XCM_AGG_FLG_SHIFT_CF23	7
 
-/* XCM agg counter flag selection */
-#define DQ_XCM_CORE_DQ_CF_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_CORE_TERMINATE_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_CORE_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_DQ_CF_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
-#define DQ_XCM_ETH_TERMINATE_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ETH_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ETH_TPH_EN_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_FCOE_SLOW_PATH_CMD           BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_DQ_FLUSH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_ISCSI_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
-#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
-#define DQ_XCM_TOE_DQ_FLUSH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
-#define DQ_XCM_TOE_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+/* XCM agg counter flag selection (FW) */
+#define DQ_XCM_CORE_DQ_CF_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_TERMINATE_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_DQ_CF_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD			BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
 
 /* UCM agg counter flag selection (HW) */
 #define	DQ_UCM_AGG_FLG_SHIFT_CF0	0
@@ -317,9 +309,9 @@
 #define DQ_TCM_AGG_FLG_SHIFT_CF6	6
 #define DQ_TCM_AGG_FLG_SHIFT_CF7	7
 /* TCM agg counter flag selection (FW) */
-#define DQ_TCM_FCOE_FLUSH_Q0_CMD            BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
-#define DQ_TCM_FCOE_DUMMY_TIMER_CMD         BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
-#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_ISCSI_FLUSH_Q0_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_TOE_FLUSH_Q0_CMD		BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
@@ -327,18 +319,18 @@
 #define DQ_TCM_IWARP_POST_RQ_CF_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 
 /* PWM address mapping */
-#define DQ_PWM_OFFSET_DPM_BASE	0x0
-#define DQ_PWM_OFFSET_DPM_END	0x27
+#define DQ_PWM_OFFSET_DPM_BASE		0x0
+#define DQ_PWM_OFFSET_DPM_END		0x27
 #define DQ_PWM_OFFSET_XCM16_BASE	0x40
 #define DQ_PWM_OFFSET_XCM32_BASE	0x44
 #define DQ_PWM_OFFSET_UCM16_BASE	0x48
 #define DQ_PWM_OFFSET_UCM32_BASE	0x4C
-#define DQ_PWM_OFFSET_UCM16_4	0x50
+#define DQ_PWM_OFFSET_UCM16_4		0x50
 #define DQ_PWM_OFFSET_TCM16_BASE	0x58
 #define DQ_PWM_OFFSET_TCM32_BASE	0x5C
-#define DQ_PWM_OFFSET_XCM_FLAGS	0x68
-#define DQ_PWM_OFFSET_UCM_FLAGS	0x69
-#define DQ_PWM_OFFSET_TCM_FLAGS	0x6B
+#define DQ_PWM_OFFSET_XCM_FLAGS		0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS		0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS		0x6B
 
 #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD		(DQ_PWM_OFFSET_XCM16_BASE + 2)
 #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT	(DQ_PWM_OFFSET_UCM32_BASE)
@@ -347,10 +339,11 @@
 #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS	(DQ_PWM_OFFSET_UCM_FLAGS)
 #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 1)
 #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD		(DQ_PWM_OFFSET_TCM16_BASE + 3)
-#define	DQ_REGION_SHIFT	(12)
+
+#define	DQ_REGION_SHIFT			(12)
 
 /* DPM */
-#define	DQ_DPM_WQE_BUFF_SIZE	(320)
+#define	DQ_DPM_WQE_BUFF_SIZE		(320)
 
 /* Conn type ranges */
 #define	DQ_CONN_TYPE_RANGE_SHIFT	(4)
@@ -359,29 +352,30 @@
 /* QM CONSTANTS  */
 /*****************/
 
-/* number of TX queues in the QM */
+/* Number of TX queues in the QM */
 #define MAX_QM_TX_QUEUES_K2	512
 #define MAX_QM_TX_QUEUES_BB	448
 #define MAX_QM_TX_QUEUES	MAX_QM_TX_QUEUES_K2
 
-/* number of Other queues in the QM */
+/* Number of Other queues in the QM */
 #define MAX_QM_OTHER_QUEUES_BB	64
 #define MAX_QM_OTHER_QUEUES_K2	128
 #define MAX_QM_OTHER_QUEUES	MAX_QM_OTHER_QUEUES_K2
 
-/* number of queues in a PF queue group */
+/* Number of queues in a PF queue group */
 #define QM_PF_QUEUE_GROUP_SIZE	8
 
-/* the size of a single queue element in bytes */
-#define QM_PQ_ELEMENT_SIZE                      4
+/* The size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE	4
 
-/* base number of Tx PQs in the CM PQ representation.
- * should be used when storing PQ IDs in CM PQ registers and context
+/* Base number of Tx PQs in the CM PQ representation.
+ * Should be used when storing PQ IDs in CM PQ registers and context.
  */
-#define CM_TX_PQ_BASE	0x200
+#define CM_TX_PQ_BASE		0x200
 
-/* number of global Vport/QCN rate limiters */
+/* Number of global Vport/QCN rate limiters */
 #define MAX_QM_GLOBAL_RLS	256
+
 /* QM registers data */
 #define QM_LINE_CRD_REG_WIDTH		16
 #define QM_LINE_CRD_REG_SIGN_BIT	BIT((QM_LINE_CRD_REG_WIDTH - 1))
@@ -400,7 +394,7 @@
 #define CAU_FSM_ETH_TX  1
 
 /* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB    12
+#define PIS_PER_SB_E4	12
 
 #define CAU_HC_STOPPED_STATE	3
 #define CAU_HC_DISABLE_STATE	4
@@ -432,8 +426,7 @@
 
 #define IGU_CMD_INT_ACK_BASE		0x0400
 #define IGU_CMD_INT_ACK_UPPER		(IGU_CMD_INT_ACK_BASE +	\
-					 MAX_TOT_SB_PER_PATH -	\
-					 1)
+					 MAX_TOT_SB_PER_PATH - 1)
 #define IGU_CMD_INT_ACK_RESERVED_UPPER	0x05ff
 
 #define IGU_CMD_ATTN_BIT_UPD_UPPER	0x05f0
@@ -447,8 +440,7 @@
 
 #define IGU_CMD_PROD_UPD_BASE			0x0600
 #define IGU_CMD_PROD_UPD_UPPER			(IGU_CMD_PROD_UPD_BASE +\
-						 MAX_TOT_SB_PER_PATH - \
-						 1)
+						 MAX_TOT_SB_PER_PATH - 1)
 #define IGU_CMD_PROD_UPD_RESERVED_UPPER		0x07ff
 
 /*****************/
@@ -514,129 +506,126 @@
 	 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
 
 /* PF BAR */
-#define PXP_BAR0_START_GRC	0x0000
-#define PXP_BAR0_GRC_LENGTH	0x1C00000
-#define PXP_BAR0_END_GRC	(PXP_BAR0_START_GRC + \
-				 PXP_BAR0_GRC_LENGTH - 1)
+#define PXP_BAR0_START_GRC		0x0000
+#define PXP_BAR0_GRC_LENGTH		0x1C00000
+#define PXP_BAR0_END_GRC		(PXP_BAR0_START_GRC + \
+					 PXP_BAR0_GRC_LENGTH - 1)
 
-#define PXP_BAR0_START_IGU	0x1C00000
-#define PXP_BAR0_IGU_LENGTH	0x10000
-#define PXP_BAR0_END_IGU	(PXP_BAR0_START_IGU + \
-				 PXP_BAR0_IGU_LENGTH - 1)
+#define PXP_BAR0_START_IGU		0x1C00000
+#define PXP_BAR0_IGU_LENGTH		0x10000
+#define PXP_BAR0_END_IGU		(PXP_BAR0_START_IGU + \
+					 PXP_BAR0_IGU_LENGTH - 1)
 
-#define PXP_BAR0_START_TSDM	0x1C80000
-#define PXP_BAR0_SDM_LENGTH	0x40000
+#define PXP_BAR0_START_TSDM		0x1C80000
+#define PXP_BAR0_SDM_LENGTH		0x40000
 #define PXP_BAR0_SDM_RESERVED_LENGTH	0x40000
-#define PXP_BAR0_END_TSDM	(PXP_BAR0_START_TSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_END_TSDM		(PXP_BAR0_START_TSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_MSDM	0x1D00000
-#define PXP_BAR0_END_MSDM	(PXP_BAR0_START_MSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_MSDM		0x1D00000
+#define PXP_BAR0_END_MSDM		(PXP_BAR0_START_MSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_USDM	0x1D80000
-#define PXP_BAR0_END_USDM	(PXP_BAR0_START_USDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_USDM		0x1D80000
+#define PXP_BAR0_END_USDM		(PXP_BAR0_START_USDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_XSDM	0x1E00000
-#define PXP_BAR0_END_XSDM	(PXP_BAR0_START_XSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_XSDM		0x1E00000
+#define PXP_BAR0_END_XSDM		(PXP_BAR0_START_XSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_YSDM	0x1E80000
-#define PXP_BAR0_END_YSDM	(PXP_BAR0_START_YSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_YSDM		0x1E80000
+#define PXP_BAR0_END_YSDM		(PXP_BAR0_START_YSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
-#define PXP_BAR0_START_PSDM	0x1F00000
-#define PXP_BAR0_END_PSDM	(PXP_BAR0_START_PSDM + \
-				 PXP_BAR0_SDM_LENGTH - 1)
+#define PXP_BAR0_START_PSDM		0x1F00000
+#define PXP_BAR0_END_PSDM		(PXP_BAR0_START_PSDM + \
+					 PXP_BAR0_SDM_LENGTH - 1)
 
 #define PXP_BAR0_FIRST_INVALID_ADDRESS	(PXP_BAR0_END_PSDM + 1)
 
 /* VF BAR */
-#define PXP_VF_BAR0	0
+#define PXP_VF_BAR0			0
 
-#define PXP_VF_BAR0_START_GRC	0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH	0x200
-#define PXP_VF_BAR0_END_GRC	(PXP_VF_BAR0_START_GRC + \
-				 PXP_VF_BAR0_GRC_LENGTH - 1)
+#define PXP_VF_BAR0_START_IGU		0
+#define PXP_VF_BAR0_IGU_LENGTH		0x3000
+#define PXP_VF_BAR0_END_IGU		(PXP_VF_BAR0_START_IGU + \
+					 PXP_VF_BAR0_IGU_LENGTH - 1)
 
-#define PXP_VF_BAR0_START_IGU                   0
-#define PXP_VF_BAR0_IGU_LENGTH                  0x3000
-#define PXP_VF_BAR0_END_IGU                     (PXP_VF_BAR0_START_IGU + \
-						 PXP_VF_BAR0_IGU_LENGTH - 1)
+#define PXP_VF_BAR0_START_DQ		0x3000
+#define PXP_VF_BAR0_DQ_LENGTH		0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET	0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS	(PXP_VF_BAR0_START_DQ +	\
+					 PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS	(PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+					 + 4)
+#define PXP_VF_BAR0_END_DQ		(PXP_VF_BAR0_START_DQ +	\
+					 PXP_VF_BAR0_DQ_LENGTH - 1)
 
-#define PXP_VF_BAR0_START_DQ                    0x3000
-#define PXP_VF_BAR0_DQ_LENGTH                   0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET            0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS           (PXP_VF_BAR0_START_DQ +	\
-						 PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS         (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
-						 + 4)
-#define PXP_VF_BAR0_END_DQ                      (PXP_VF_BAR0_START_DQ +	\
-						 PXP_VF_BAR0_DQ_LENGTH - 1)
+#define PXP_VF_BAR0_START_TSDM_ZONE_B	0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B	0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B	(PXP_VF_BAR0_START_TSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
 
-#define PXP_VF_BAR0_START_TSDM_ZONE_B           0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B           0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B             (PXP_VF_BAR0_START_TSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
+#define PXP_VF_BAR0_START_MSDM_ZONE_B	0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B	(PXP_VF_BAR0_START_MSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
 
-#define PXP_VF_BAR0_START_MSDM_ZONE_B           0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B             (PXP_VF_BAR0_START_MSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
+#define PXP_VF_BAR0_START_USDM_ZONE_B	0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B	(PXP_VF_BAR0_START_USDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
 
-#define PXP_VF_BAR0_START_USDM_ZONE_B           0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B             (PXP_VF_BAR0_START_USDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
+#define PXP_VF_BAR0_START_XSDM_ZONE_B	0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B	(PXP_VF_BAR0_START_XSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
 
-#define PXP_VF_BAR0_START_XSDM_ZONE_B           0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B             (PXP_VF_BAR0_START_XSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
+#define PXP_VF_BAR0_START_YSDM_ZONE_B	0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B	(PXP_VF_BAR0_START_YSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
 
-#define PXP_VF_BAR0_START_YSDM_ZONE_B           0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B             (PXP_VF_BAR0_START_YSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
+#define PXP_VF_BAR0_START_PSDM_ZONE_B	0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B	(PXP_VF_BAR0_START_PSDM_ZONE_B + \
+					 PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
 
-#define PXP_VF_BAR0_START_PSDM_ZONE_B           0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B             (PXP_VF_BAR0_START_PSDM_ZONE_B \
-						 +			       \
-						 PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
-						 - 1)
+#define PXP_VF_BAR0_START_GRC		0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH		0x200
+#define PXP_VF_BAR0_END_GRC		(PXP_VF_BAR0_START_GRC + \
+					 PXP_VF_BAR0_GRC_LENGTH - 1)
 
-#define PXP_VF_BAR0_START_SDM_ZONE_A            0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A              0x10000
+#define PXP_VF_BAR0_START_SDM_ZONE_A	0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A	0x10000
 
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH           32
+#define PXP_VF_BAR0_START_IGU2		0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH		0xD000
+#define PXP_VF_BAR0_END_IGU2		(PXP_VF_BAR0_START_IGU2 + \
+					 PXP_VF_BAR0_IGU2_LENGTH - 1)
 
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN		12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER		1024
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH	32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN	12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER	1024
 
 /* ILT Records */
 #define PXP_NUM_ILT_RECORDS_BB 7600
 #define PXP_NUM_ILT_RECORDS_K2 11000
 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM	320
+
 /*****************/
 /* PRM CONSTANTS */
 /*****************/
-#define PRM_DMA_PAD_BYTES_NUM  2
+#define PRM_DMA_PAD_BYTES_NUM	2
+
 /*****************/
 /* SDMs CONSTANTS  */
 /*****************/
 
-#define SDM_OP_GEN_TRIG_NONE                    0
-#define SDM_OP_GEN_TRIG_WAKE_THREAD             1
-#define SDM_OP_GEN_TRIG_AGG_INT                 2
-#define SDM_OP_GEN_TRIG_LOADER                  4
+#define SDM_OP_GEN_TRIG_NONE		0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD	1
+#define SDM_OP_GEN_TRIG_AGG_INT		2
+#define SDM_OP_GEN_TRIG_LOADER		4
 #define SDM_OP_GEN_TRIG_INDICATE_ERROR  6
 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT   9
 
@@ -644,26 +633,26 @@
 /* Completion types */
 /********************/
 
-#define SDM_COMP_TYPE_NONE              0
-#define SDM_COMP_TYPE_WAKE_THREAD       1
-#define SDM_COMP_TYPE_AGG_INT           2
-#define SDM_COMP_TYPE_CM                3
-#define SDM_COMP_TYPE_LOADER            4
-#define SDM_COMP_TYPE_PXP               5
-#define SDM_COMP_TYPE_INDICATE_ERROR    6
-#define SDM_COMP_TYPE_RELEASE_THREAD    7
-#define SDM_COMP_TYPE_RAM               8
-#define SDM_COMP_TYPE_INC_ORDER_CNT     9
+#define SDM_COMP_TYPE_NONE		0
+#define SDM_COMP_TYPE_WAKE_THREAD	1
+#define SDM_COMP_TYPE_AGG_INT		2
+#define SDM_COMP_TYPE_CM		3
+#define SDM_COMP_TYPE_LOADER		4
+#define SDM_COMP_TYPE_PXP		5
+#define SDM_COMP_TYPE_INDICATE_ERROR	6
+#define SDM_COMP_TYPE_RELEASE_THREAD	7
+#define SDM_COMP_TYPE_RAM		8
+#define SDM_COMP_TYPE_INC_ORDER_CNT	9
 
 /*****************/
-/* PBF Constants */
+/* PBF CONSTANTS */
 /*****************/
 
 /* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES	3328
 
 /* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
+#define BTB_MAX_BLOCKS		1440
 
 /*****************/
 /* PRS CONSTANTS */
@@ -671,14 +660,7 @@
 
 #define PRS_GFT_CAM_LINES_NO_MATCH	31
 
-/* Async data KCQ CQE */
-struct async_data {
-	__le32	cid;
-	__le16	itid;
-	u8	error_code;
-	u8	fw_debug_param;
-};
-
+/* Interrupt coalescing TimeSet */
 struct coalescing_timeset {
 	u8 value;
 #define	COALESCING_TIMESET_TIMESET_MASK		0x7F
@@ -692,23 +674,32 @@ struct common_queue_zone {
 	__le16 reserved;
 };
 
+/* ETH Rx producers data */
 struct eth_rx_prod_data {
 	__le16 bd_prod;
 	__le16 cqe_prod;
 };
 
-struct regpair {
-	__le32	lo;
-	__le32	hi;
+struct tcp_ulp_connect_done_params {
+	__le16 mss;
+	u8 snd_wnd_scale;
+	u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK		0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT		0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK	0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT	1
 };
 
-struct vf_pf_channel_eqe_data {
-	struct regpair msg_addr;
+struct iscsi_connect_done_results {
+	__le16 icid;
+	__le16 conn_id;
+	struct tcp_ulp_connect_done_params params;
 };
 
 struct iscsi_eqe_data {
-	__le32 cid;
+	__le16 icid;
 	__le16 conn_id;
+	__le16 reserved;
 	u8 error_code;
 	u8 error_pdu_opcode_reserved;
 #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK		0x3F
@@ -719,52 +710,6 @@ struct iscsi_eqe_data {
 #define ISCSI_EQE_DATA_RESERVED0_SHIFT			7
 };
 
-struct rdma_eqe_destroy_qp {
-	__le32 cid;
-	u8 reserved[4];
-};
-
-union rdma_eqe_data {
-	struct regpair async_handle;
-	struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
-};
-
-struct malicious_vf_eqe_data {
-	u8 vf_id;
-	u8 err_id;
-	__le16 reserved[3];
-};
-
-struct initial_cleanup_eqe_data {
-	u8 vf_id;
-	u8 reserved[7];
-};
-
-/* Event Data Union */
-union event_ring_data {
-	u8 bytes[8];
-	struct vf_pf_channel_eqe_data vf_pf_channel;
-	struct iscsi_eqe_data iscsi_info;
-	union rdma_eqe_data rdma_data;
-	struct malicious_vf_eqe_data malicious_vf;
-	struct initial_cleanup_eqe_data vf_init_cleanup;
-};
-
-/* Event Ring Entry */
-struct event_ring_entry {
-	u8			protocol_id;
-	u8			opcode;
-	__le16			reserved0;
-	__le16			echo;
-	u8			fw_return_code;
-	u8			flags;
-#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
-#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
-	union event_ring_data	data;
-};
-
 /* Multi function mode */
 enum mf_mode {
 	ERROR_MODE /* Unsupported mode */,
@@ -781,13 +726,31 @@ enum protocol_type {
 	PROTOCOLID_CORE,
 	PROTOCOLID_ETH,
 	PROTOCOLID_IWARP,
-	PROTOCOLID_RESERVED5,
+	PROTOCOLID_RESERVED0,
 	PROTOCOLID_PREROCE,
 	PROTOCOLID_COMMON,
-	PROTOCOLID_RESERVED6,
+	PROTOCOLID_RESERVED1,
 	MAX_PROTOCOL_TYPE
 };
 
+struct regpair {
+	__le32 lo;
+	__le32 hi;
+};
+
+/* RoCE Destroy Event Data */
+struct rdma_eqe_destroy_qp {
+	__le32 cid;
+	u8 reserved[4];
+};
+
+/* RDMA Event Data Union */
+union rdma_eqe_data {
+	struct regpair async_handle;
+	struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
+/* Ustorm Queue Zone */
 struct ustorm_eth_queue_zone {
 	struct coalescing_timeset int_coalescing_timeset;
 	u8 reserved[3];
@@ -798,62 +761,71 @@ struct ustorm_queue_zone {
 	struct common_queue_zone common;
 };
 
-/* status block structure */
+/* Status block structure */
 struct cau_pi_entry {
-	u32 prod;
-#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
-#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
-#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
-#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
-#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
-#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
-#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
-#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+	__le32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK	0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT	0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK	0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT	16
+#define CAU_PI_ENTRY_FSM_SEL_MASK	0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT	23
+#define CAU_PI_ENTRY_RESERVED_MASK	0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT	24
 };
 
-/* status block structure */
+/* Status block structure */
 struct cau_sb_entry {
-	u32 data;
-#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
-#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
-#define CAU_SB_ENTRY_STATE0_MASK       0xF
-#define CAU_SB_ENTRY_STATE0_SHIFT      24
-#define CAU_SB_ENTRY_STATE1_MASK       0xF
-#define CAU_SB_ENTRY_STATE1_SHIFT      28
-	u32 params;
-#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
-#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
-#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
-#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
-#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
-#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
-#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
-#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
-#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
-#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
-#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
-#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
-#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
-#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
-#define CAU_SB_ENTRY_TPH_MASK          0x1
-#define CAU_SB_ENTRY_TPH_SHIFT         31
+	__le32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK	0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT	0
+#define CAU_SB_ENTRY_STATE0_MASK	0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT	24
+#define CAU_SB_ENTRY_STATE1_MASK	0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT	28
+	__le32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK	0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT	0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK	0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT	7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK	0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT	14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK	0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT	16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK	0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT	18
+#define CAU_SB_ENTRY_VF_VALID_MASK	0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT	26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK	0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT	27
+#define CAU_SB_ENTRY_TPH_MASK		0x1
+#define CAU_SB_ENTRY_TPH_SHIFT		31
 };
 
-/* core doorbell data */
+/* Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+	IGU_COMMAND_TYPE_NOP = 0,
+	IGU_COMMAND_TYPE_SET = 1,
+	MAX_COMMAND_TYPE_BIT
+};
+
+/* Core doorbell data */
 struct core_db_data {
 	u8 params;
-#define CORE_DB_DATA_DEST_MASK         0x3
-#define CORE_DB_DATA_DEST_SHIFT        0
-#define CORE_DB_DATA_AGG_CMD_MASK      0x3
-#define CORE_DB_DATA_AGG_CMD_SHIFT     2
-#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
-#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
-#define CORE_DB_DATA_RESERVED_MASK     0x1
-#define CORE_DB_DATA_RESERVED_SHIFT    5
-#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
-	u8	agg_flags;
-	__le16	spq_prod;
+#define CORE_DB_DATA_DEST_MASK		0x3
+#define CORE_DB_DATA_DEST_SHIFT		0
+#define CORE_DB_DATA_AGG_CMD_MASK	0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT	2
+#define CORE_DB_DATA_BYPASS_EN_MASK	0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT	4
+#define CORE_DB_DATA_RESERVED_MASK	0x1
+#define CORE_DB_DATA_RESERVED_SHIFT	5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT	6
+	u8 agg_flags;
+	__le16 spq_prod;
 };
 
 /* Enum of doorbell aggregative command selection */
@@ -909,67 +881,69 @@ struct db_l2_dpm_sge {
 	struct regpair addr;
 	__le16 nbytes;
 	__le16 bitfields;
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK	0x1FF
-#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
-#define DB_L2_DPM_SGE_RESERVED0_MASK	0x3
-#define DB_L2_DPM_SGE_RESERVED0_SHIFT	9
-#define DB_L2_DPM_SGE_ST_VALID_MASK	0x1
-#define DB_L2_DPM_SGE_ST_VALID_SHIFT	11
-#define DB_L2_DPM_SGE_RESERVED1_MASK	0xF
-#define DB_L2_DPM_SGE_RESERVED1_SHIFT	12
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK		0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT	0
+#define DB_L2_DPM_SGE_RESERVED0_MASK		0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT		9
+#define DB_L2_DPM_SGE_ST_VALID_MASK		0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT		11
+#define DB_L2_DPM_SGE_RESERVED1_MASK		0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT		12
 	__le32 reserved2;
 };
 
 /* Structure for doorbell address, in legacy mode */
 struct db_legacy_addr {
 	__le32 addr;
-#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
-#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
-#define DB_LEGACY_ADDR_DEMS_MASK       0x7
-#define DB_LEGACY_ADDR_DEMS_SHIFT      2
-#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
-#define DB_LEGACY_ADDR_ICID_SHIFT      5
+#define DB_LEGACY_ADDR_RESERVED0_MASK	0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT	0
+#define DB_LEGACY_ADDR_DEMS_MASK	0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT	2
+#define DB_LEGACY_ADDR_ICID_MASK	0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT	5
 };
 
 /* Structure for doorbell address, in PWM mode */
 struct db_pwm_addr {
 	__le32 addr;
 #define DB_PWM_ADDR_RESERVED0_MASK	0x7
-#define DB_PWM_ADDR_RESERVED0_SHIFT 0
-#define DB_PWM_ADDR_OFFSET_MASK	0x7F
+#define DB_PWM_ADDR_RESERVED0_SHIFT	0
+#define DB_PWM_ADDR_OFFSET_MASK		0x7F
 #define DB_PWM_ADDR_OFFSET_SHIFT	3
-#define DB_PWM_ADDR_WID_MASK	0x3
-#define DB_PWM_ADDR_WID_SHIFT	10
-#define DB_PWM_ADDR_DPI_MASK	0xFFFF
-#define DB_PWM_ADDR_DPI_SHIFT	12
+#define DB_PWM_ADDR_WID_MASK		0x3
+#define DB_PWM_ADDR_WID_SHIFT		10
+#define DB_PWM_ADDR_DPI_MASK		0xFFFF
+#define DB_PWM_ADDR_DPI_SHIFT		12
 #define DB_PWM_ADDR_RESERVED1_MASK	0xF
-#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+#define DB_PWM_ADDR_RESERVED1_SHIFT	28
 };
 
-/* Parameters to RoCE firmware, passed in EDPM doorbell */
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
 struct db_rdma_dpm_params {
 	__le32 params;
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK		0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT		0
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK	0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT	6
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK		0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT		8
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK	0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT	16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK	0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT	27
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK	0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK		0x1
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT		29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK	0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT	30
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK			0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT			0
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK		0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT		6
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK			0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT			8
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK		0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT		16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK		0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT		27
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK		0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT		28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK			0x1
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT			29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK		0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT		30
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK	0x1
 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT	31
 };
 
-/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */
+/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
+ * DPM burst.
+ */
 struct db_rdma_dpm_data {
 	__le16 icid;
 	__le16 prod_val;
@@ -987,22 +961,22 @@ enum igu_int_cmd {
 
 /* IGU producer or consumer update command */
 struct igu_prod_cons_update {
-	u32 sb_id_and_flags;
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
-#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
-#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
-#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
-#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
-#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
-#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
-#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
-#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
-	u32 reserved1;
+	__le32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK		0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT		0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK		0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT		24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK		0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT		25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK	0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT	27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK		0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT		28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK		0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT		29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK		0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT		31
+	__le32 reserved1;
 };
 
 /* Igu segments access for default status block only */
@@ -1012,38 +986,63 @@ enum igu_seg_access {
 	MAX_IGU_SEG_ACCESS
 };
 
-struct parsing_and_err_flags {
-	__le16 flags;
-#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
-#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
-#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
-#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
-#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
-#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
-#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
-#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
-#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
-#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
-#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+/* Enumeration for L3 type field of parsing_and_err_flags.
+ * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
+ * (This field can be filled according to the last-ethertype)
+ */
+enum l3_type {
+	e_l3_type_unknown,
+	e_l3_type_ipv4,
+	e_l3_type_ipv6,
+	MAX_L3_TYPE
 };
 
+/* Enumeration for l4Protocol field of parsing_and_err_flags.
+ * L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
+ * If the packet is IPv4 fragment, and its not the first fragment, the
+ * protocol-type should be set to none.
+ */
+enum l4_protocol {
+	e_l4_protocol_none,
+	e_l4_protocol_tcp,
+	e_l4_protocol_udp,
+	MAX_L4_PROTOCOL
+};
+
+/* Parsing and error flags field */
+struct parsing_and_err_flags {
+	__le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK			0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT			0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK			0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT			2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT			4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT		5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT	6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT			7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT		8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT			9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT		10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK			0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT			11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT		12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT		13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK	0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT	14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK		0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT		15
+};
+
+/* Parsing error flags bitmap */
 struct parsing_err_flags {
 	__le16 flags;
 #define PARSING_ERR_FLAGS_MAC_ERROR_MASK				0x1
@@ -1080,266 +1079,260 @@ struct parsing_err_flags {
 #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT			15
 };
 
+/* Pb context */
 struct pb_context {
 	__le32 crc[4];
 };
 
+/* Concrete Function ID */
 struct pxp_concrete_fid {
 	__le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK     0xF
-#define PXP_CONCRETE_FID_PFID_SHIFT    0
-#define PXP_CONCRETE_FID_PORT_MASK     0x3
-#define PXP_CONCRETE_FID_PORT_SHIFT    4
-#define PXP_CONCRETE_FID_PATH_MASK     0x1
-#define PXP_CONCRETE_FID_PATH_SHIFT    6
-#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
-#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
-#define PXP_CONCRETE_FID_VFID_MASK     0xFF
-#define PXP_CONCRETE_FID_VFID_SHIFT    8
+#define PXP_CONCRETE_FID_PFID_MASK	0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT	0
+#define PXP_CONCRETE_FID_PORT_MASK	0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT	4
+#define PXP_CONCRETE_FID_PATH_MASK	0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT	6
+#define PXP_CONCRETE_FID_VFVALID_MASK	0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT	7
+#define PXP_CONCRETE_FID_VFID_MASK	0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT	8
 };
 
+/* Concrete Function ID */
 struct pxp_pretend_concrete_fid {
 	__le16 fid;
-#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
-#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
-#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
-#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
-#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
-#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK	0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT	0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK	0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT	4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK	0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT	7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK	0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT	8
 };
 
+/* Function ID */
 union pxp_pretend_fid {
 	struct pxp_pretend_concrete_fid concrete_fid;
-	__le16				opaque_fid;
+	__le16 opaque_fid;
 };
 
-/* Pxp Pretend Command Register. */
+/* Pxp Pretend Command Register */
 struct pxp_pretend_cmd {
-	union pxp_pretend_fid	fid;
-	__le16			control;
-#define PXP_PRETEND_CMD_PATH_MASK              0x1
-#define PXP_PRETEND_CMD_PATH_SHIFT             0
-#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
-#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
-#define PXP_PRETEND_CMD_PORT_MASK              0x3
-#define PXP_PRETEND_CMD_PORT_SHIFT             2
-#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
-#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
-#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
-#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
-#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
-#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
-#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
-#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
-#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
-#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
-#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+	union pxp_pretend_fid fid;
+	__le16 control;
+#define PXP_PRETEND_CMD_PATH_MASK		0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT		0
+#define PXP_PRETEND_CMD_USE_PORT_MASK		0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT		1
+#define PXP_PRETEND_CMD_PORT_MASK		0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT		2
+#define PXP_PRETEND_CMD_RESERVED0_MASK		0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT		4
+#define PXP_PRETEND_CMD_RESERVED1_MASK		0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT		8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK	0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT	12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK	0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT	13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK	0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT	14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK	0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT	15
 };
 
-/* PTT Record in PXP Admin Window. */
+/* PTT Record in PXP Admin Window */
 struct pxp_ptt_entry {
-	__le32			offset;
-#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
-#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
-#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
-#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
-	struct pxp_pretend_cmd	pretend;
+	__le32 offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK	0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT	0
+#define PXP_PTT_ENTRY_RESERVED0_MASK	0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT	23
+	struct pxp_pretend_cmd pretend;
 };
 
-/* VF Zone A Permission Register. */
+/* VF Zone A Permission Register */
 struct pxp_vf_zone_a_permission {
 	__le32 control;
-#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK	0xFF
-#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT	0
-#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK	0x1
-#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT	8
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK	0x7F
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK	0xFFFF
-#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK		0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT		0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK		0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT		8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK		0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT	9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK		0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT	16
 };
 
-/* RSS hash type */
+/* Rdif context */
 struct rdif_task_context {
 	__le32 initial_ref_tag;
 	__le16 app_tag_value;
 	__le16 app_tag_mask;
 	u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK            0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT           0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK      0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT     1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK           0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT          2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK         0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT        3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK          0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT         4
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK                0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT               6
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK         0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT        7
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK		0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT		0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK	0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT	1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK		0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT		2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK	0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT	3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK		0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT		4
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK			0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT		6
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK	0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT	7
 	u8 partial_dif_data[7];
 	__le16 partial_crc_value;
 	__le16 partial_checksum_value;
 	__le32 offset_in_io;
 	__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK           0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT          0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT         1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK          0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT         2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK            0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT           3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT          4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK           0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT          5
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK            0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT           6
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK           0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT          9
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK           0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT          11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK               0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT              12
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK        0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT       13
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT  14
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK   0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT  15
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK			0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT			0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT		1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT		2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK			0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT			3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT			4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK			0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT			5
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK			0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT			6
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK			0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT			9
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK			0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT			11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK			0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT			12
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK		0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT		13
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK	0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT	14
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK	0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT	15
 	__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK    0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT   0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK  0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK               0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT              8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK        0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT       9
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK              0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT             10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK               0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT              14
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK		0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT		0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK	0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT	4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK			0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT			8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK		0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT		9
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK			0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT			10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK			0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT			14
 	__le32 reserved2;
 };
 
-/* RSS hash type */
-enum rss_hash_type {
-	RSS_HASH_TYPE_DEFAULT	= 0,
-	RSS_HASH_TYPE_IPV4	= 1,
-	RSS_HASH_TYPE_TCP_IPV4	= 2,
-	RSS_HASH_TYPE_IPV6	= 3,
-	RSS_HASH_TYPE_TCP_IPV6	= 4,
-	RSS_HASH_TYPE_UDP_IPV4	= 5,
-	RSS_HASH_TYPE_UDP_IPV6	= 6,
-	MAX_RSS_HASH_TYPE
-};
-
-/* status block structure */
-struct status_block {
-	__le16	pi_array[PIS_PER_SB];
+/* Status block structure */
+struct status_block_e4 {
+	__le16	pi_array[PIS_PER_SB_E4];
 	__le32	sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT     0
-#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
-#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+#define STATUS_BLOCK_E4_SB_NUM_MASK	0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT	0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK	0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT	9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK	0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT	16
 	__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK		0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT	0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK		0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT		24
 };
 
+/* Tdif context */
 struct tdif_task_context {
 	__le32 initial_ref_tag;
 	__le16 app_tag_value;
 	__le16 app_tag_mask;
-	__le16 partial_crc_valueB;
-	__le16 partial_checksum_valueB;
+	__le16 partial_crc_value_b;
+	__le16 partial_checksum_value_b;
 	__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT   0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT              8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK         0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT        9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK                0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT               10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK	0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT	0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK	0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT	4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK			0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT			8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK			0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT		9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK			0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT			10
 	u8 reserved1;
 	u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK             0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT            0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK       0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT      1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK            0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT           2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK          0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT         3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK           0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT          4
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK                 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT                6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT               7
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT			0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK		0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT		1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK			0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT			2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK		0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT		3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK			0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT			4
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK				0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT			6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK			0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT			7
 	__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK            0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT           0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT          1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK           0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT          2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK             0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT            3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT           4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK            0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT           5
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK             0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT            6
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK            0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT           9
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK            0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT           11
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT               12
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK         0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT        13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK    0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT   14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK  0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK               0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT              22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK        0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT       23
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK               0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT              24
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT   28
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK    0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT   29
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK          0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT         30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK                0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT               31
-	__le32 offset_in_iob;
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK			0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT			0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT		1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT		2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK			0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT			3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT			4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK			0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT			5
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK			0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT			6
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK			0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT			9
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK			0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT			11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK			0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT			12
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK		0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT		13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK	0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT	14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK	0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT	18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK			0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT			22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK		0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT		23
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK			0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT			24
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK	0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT	28
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK	0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT	29
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK		0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT		30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK			0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT			31
+	__le32 offset_in_io_b;
 	__le16 partial_crc_value_a;
-	__le16 partial_checksum_valuea_;
-	__le32 offset_in_ioa;
+	__le16 partial_checksum_value_a;
+	__le32 offset_in_io_a;
 	u8 partial_dif_data_a[8];
 	u8 partial_dif_data_b[8];
 };
 
+/* Timers context */
 struct timers_context {
 	__le32 logical_client_0;
 #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK	0x7FFFFFF
@@ -1385,6 +1378,7 @@ struct timers_context {
 #define TIMERS_CONTEXT_RESERVED7_SHIFT			29
 };
 
+/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */
 enum tunnel_next_protocol {
 	e_unknown = 0,
 	e_l2 = 1,
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cb06e6e..9db0285 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -36,150 +36,168 @@
 /********************/
 /* ETH FW CONSTANTS */
 /********************/
-#define ETH_HSI_VER_MAJOR                   3
-#define ETH_HSI_VER_MINOR	10
+
+#define ETH_HSI_VER_MAJOR		3
+#define ETH_HSI_VER_MINOR		10
 
 #define ETH_HSI_VER_NO_PKT_LEN_TUNN	5
 
-#define ETH_CACHE_LINE_SIZE                 64
-#define ETH_RX_CQE_GAP	32
-#define ETH_MAX_RAMROD_PER_CON                          8
-#define ETH_TX_BD_PAGE_SIZE_BYTES                       4096
-#define ETH_RX_BD_PAGE_SIZE_BYTES                       4096
-#define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
-#define ETH_RX_NUM_NEXT_PAGE_BDS                        2
+#define ETH_CACHE_LINE_SIZE		64
+#define ETH_RX_CQE_GAP			32
+#define ETH_MAX_RAMROD_PER_CON		8
+#define ETH_TX_BD_PAGE_SIZE_BYTES	4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES	4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES	4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS	2
 
-#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET          253
-#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET          251
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET	253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET	251
 
-#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
-#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
-#define ETH_TX_MAX_BDS_PER_LSO_PACKET	255
-#define ETH_TX_MAX_LSO_HDR_NBD                                          4
-#define ETH_TX_MIN_BDS_PER_LSO_PKT                                      3
-#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT       3
-#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT            2
-#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE          2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN	(9700 - (4 + 4 + 12 + 8))
-#define ETH_TX_MAX_LSO_HDR_BYTES                    510
-#define ETH_TX_LSO_WINDOW_BDS_NUM	(18 - 1)
-#define ETH_TX_LSO_WINDOW_MIN_LEN	9700
-#define ETH_TX_MAX_LSO_PAYLOAD_LEN	0xFE000
-#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES	320
-#define ETH_TX_INACTIVE_SAME_AS_LAST	0xFFFF
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT			1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET		18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET			255
+#define ETH_TX_MAX_LSO_HDR_NBD				4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT			3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT	3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT		2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE		2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN		(9700 - (4 + 4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES			510
+#define ETH_TX_LSO_WINDOW_BDS_NUM			(18 - 1)
+#define ETH_TX_LSO_WINDOW_MIN_LEN			9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN			0xFE000
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES			320
+#define ETH_TX_INACTIVE_SAME_AS_LAST			0xFFFF
 
-#define ETH_NUM_STATISTIC_COUNTERS                      MAX_NUM_VPORTS
+#define ETH_NUM_STATISTIC_COUNTERS			MAX_NUM_VPORTS
 #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
 	(ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
 #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
 	(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
 
 /* Maximum number of buffers, used for RX packet placement */
-#define ETH_RX_MAX_BUFF_PER_PKT	5
-#define ETH_RX_BD_THRESHOLD	12
+#define ETH_RX_MAX_BUFF_PER_PKT		5
+#define ETH_RX_BD_THRESHOLD		12
 
-/* num of MAC/VLAN filters */
-#define ETH_NUM_MAC_FILTERS                                     512
-#define ETH_NUM_VLAN_FILTERS                            512
+/* Num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS		512
+#define ETH_NUM_VLAN_FILTERS		512
 
-/* approx. multicast constants */
-#define ETH_MULTICAST_BIN_FROM_MAC_SEED     0
-#define ETH_MULTICAST_MAC_BINS                          256
-#define ETH_MULTICAST_MAC_BINS_IN_REGS          (ETH_MULTICAST_MAC_BINS / 32)
+/* Approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED	0
+#define ETH_MULTICAST_MAC_BINS		256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS	(ETH_MULTICAST_MAC_BINS / 32)
 
-/*  ethernet vport update constants */
-#define ETH_FILTER_RULES_COUNT                          10
-#define ETH_RSS_IND_TABLE_ENTRIES_NUM           128
-#define ETH_RSS_KEY_SIZE_REGS                       10
-#define ETH_RSS_ENGINE_NUM_K2               207
-#define ETH_RSS_ENGINE_NUM_BB               127
+/* Ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT		10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM	128
+#define ETH_RSS_KEY_SIZE_REGS		10
+#define ETH_RSS_ENGINE_NUM_K2		207
+#define ETH_RSS_ENGINE_NUM_BB		127
 
 /* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM              64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE   ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
+#define ETH_TPA_MAX_AGGS_NUM		64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE	ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE	6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE	4
 
 /* Control frame check constants */
 #define ETH_CTL_FRAME_ETH_TYPE_NUM	4
 
-struct eth_tx_1st_bd_flags {
-	u8 bitfields;
-#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
-#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        0
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
-#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
-#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         2
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
-#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         3
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
-#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  4
-#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
-#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             5
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
-#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+/* GFS constants */
+#define ETH_GFT_TRASH_CAN_VPORT		0x1FF
+
+/* Destination port mode */
+enum dest_port_mode {
+	DEST_PORT_PHY,
+	DEST_PORT_LOOPBACK,
+	DEST_PORT_PHY_LOOPBACK,
+	DEST_PORT_DROP,
+	MAX_DEST_PORT_MODE
 };
 
-/* The parsing information data fo rthe first tx bd of a given packet. */
+/* Ethernet address type */
+enum eth_addr_type {
+	BROADCAST_ADDRESS,
+	MULTICAST_ADDRESS,
+	UNICAST_ADDRESS,
+	UNKNOWN_ADDRESS,
+	MAX_ETH_ADDR_TYPE
+};
+
+struct eth_tx_1st_bd_flags {
+	u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT		0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK	0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT	1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT		2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT		3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT	4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK			0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT			5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT		6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK		0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT		7
+};
+
+/* The parsing information data fo rthe first tx bd of a given packet */
 struct eth_tx_data_1st_bd {
 	__le16 vlan;
 	u8 nbds;
 	struct eth_tx_1st_bd_flags bd_flags;
 	__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK  0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
-#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK          0x1
-#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT         1
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK    0x3FFF
-#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT   2
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK	0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT	0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK	0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT	1
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK		0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT	2
 };
 
-/* The parsing information data for the second tx bd of a given packet. */
+/* The parsing information data for the second tx bd of a given packet */
 struct eth_tx_data_2nd_bd {
 	__le16 tunn_ip_size;
 	__le16	bitfields1;
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
-#define ETH_TX_DATA_2ND_BD_START_BD_MASK                  0x1
-#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT                 8
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
-#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                9
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          11
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
-#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 12
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
-#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            13
-#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
-#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   14
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
-#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      15
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK	0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT	0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK		0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT		4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK			0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT			6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT			8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK			0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT			9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT		11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT			12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK			0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT			13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK				0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT				14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK		0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT		15
 	__le16 bitfields2;
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
-#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
-#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
-#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK		0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT		0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK			0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT			13
 };
 
-/* Firmware data for L2-EDPM packet. */
+/* Firmware data for L2-EDPM packet */
 struct eth_edpm_fw_data {
 	struct eth_tx_data_1st_bd data_1st_bd;
 	struct eth_tx_data_2nd_bd data_2nd_bd;
 	__le32 reserved;
 };
 
-struct eth_fast_path_cqe_fw_debug {
-	__le16 reserved2;
-};
-
-/*  tunneling parsing flags */
+/* Tunneling parsing flags */
 struct eth_tunnel_parsing_flags {
 	u8 flags;
 #define	ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK		0x3
@@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags {
 /* PMD flow control bits */
 struct eth_pmd_flow_flags {
 	u8 flags;
-#define ETH_PMD_FLOW_FLAGS_VALID_MASK	0x1
-#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT	0
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK	0x1
-#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT	1
-#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
-#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK		0x1
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT		0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK		0x1
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT		1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK	0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT	2
 };
 
-/* Regular ETH Rx FP CQE. */
+/* Regular ETH Rx FP CQE */
 struct eth_fast_path_rx_reg_cqe {
 	u8 type;
 	u8 bitfields;
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
-#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
-#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK	0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT	0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK		0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT		3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK		0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT	7
 	__le16 pkt_len;
 	struct parsing_and_err_flags pars_flags;
 	__le16 vlan_tag;
@@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe {
 	u8 placement_offset;
 	struct eth_tunnel_parsing_flags tunnel_pars_flags;
 	u8 bd_num;
-	u8 reserved[9];
-	struct eth_fast_path_cqe_fw_debug fw_debug;
-	u8 reserved1[3];
+	u8 reserved;
+	__le16 flow_id;
+	u8 reserved1[11];
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-continue ETH Rx FP CQE. */
+/* TPA-continue ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_cont_cqe {
 	u8 type;
 	u8 tpa_agg_index;
@@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-end ETH Rx FP CQE. */
+/* TPA-end ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_end_cqe {
 	u8 type;
 	u8 tpa_agg_index;
@@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe {
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* TPA-start ETH Rx FP CQE. */
+/* TPA-start ETH Rx FP CQE */
 struct eth_fast_path_rx_tpa_start_cqe {
 	u8 type;
 	u8 bitfields;
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK  0x7
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK             0xF
-#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT            3
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK      0x1
-#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT     7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK	0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT	0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK			0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT			3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK		0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT		7
 	__le16 seg_len;
 	struct parsing_and_err_flags pars_flags;
 	__le16 vlan_tag;
@@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
 	u8 tpa_agg_index;
 	u8 header_len;
 	__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
-	struct eth_fast_path_cqe_fw_debug fw_debug;
+	__le16 flow_id;
 	u8 reserved;
 	struct eth_pmd_flow_flags pmd_flags;
 };
@@ -295,24 +313,24 @@ struct eth_rx_bd {
 	struct regpair addr;
 };
 
-/* regular ETH Rx SP CQE */
+/* Regular ETH Rx SP CQE */
 struct eth_slow_path_rx_cqe {
-	u8	type;
-	u8	ramrod_cmd_id;
-	u8	error_flag;
-	u8	reserved[25];
-	__le16	echo;
-	u8	reserved1;
+	u8 type;
+	u8 ramrod_cmd_id;
+	u8 error_flag;
+	u8 reserved[25];
+	__le16 echo;
+	u8 reserved1;
 	struct eth_pmd_flow_flags pmd_flags;
 };
 
-/* union for all ETH Rx CQE types */
+/* Union for all ETH Rx CQE types */
 union eth_rx_cqe {
-	struct eth_fast_path_rx_reg_cqe		fast_path_regular;
-	struct eth_fast_path_rx_tpa_start_cqe	fast_path_tpa_start;
-	struct eth_fast_path_rx_tpa_cont_cqe	fast_path_tpa_cont;
-	struct eth_fast_path_rx_tpa_end_cqe	fast_path_tpa_end;
-	struct eth_slow_path_rx_cqe		slow_path;
+	struct eth_fast_path_rx_reg_cqe fast_path_regular;
+	struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+	struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+	struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
+	struct eth_slow_path_rx_cqe slow_path;
 };
 
 /* ETH Rx CQE type */
@@ -339,7 +357,7 @@ enum eth_rx_tunn_type {
 	MAX_ETH_RX_TUNN_TYPE
 };
 
-/*  Aggregation end reason. */
+/* Aggregation end reason. */
 enum eth_tpa_end_reason {
 	ETH_AGG_END_UNUSED,
 	ETH_AGG_END_SP_UPDATE,
@@ -354,59 +372,59 @@ enum eth_tpa_end_reason {
 
 /* The first tx bd of a given packet */
 struct eth_tx_1st_bd {
-	struct regpair			addr;
-	__le16				nbytes;
-	struct eth_tx_data_1st_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_1st_bd data;
 };
 
 /* The second tx bd of a given packet */
 struct eth_tx_2nd_bd {
-	struct regpair			addr;
-	__le16				nbytes;
-	struct eth_tx_data_2nd_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_2nd_bd data;
 };
 
-/* The parsing information data for the third tx bd of a given packet. */
+/* The parsing information data for the third tx bd of a given packet */
 struct eth_tx_data_3rd_bd {
 	__le16 lso_mss;
 	__le16 bitfields;
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
-#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
-#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
-#define ETH_TX_DATA_3RD_BD_START_BD_MASK        0x1
-#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT       8
-#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK       0x7F
-#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT      9
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK	0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT	0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK		0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT	4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK	0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT	8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK	0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT	9
 	u8 tunn_l4_hdr_start_offset_w;
 	u8 tunn_hdr_size_w;
 };
 
 /* The third tx bd of a given packet */
 struct eth_tx_3rd_bd {
-	struct regpair			addr;
-	__le16				nbytes;
-	struct eth_tx_data_3rd_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_3rd_bd data;
 };
 
-/* Complementary information for the regular tx bd of a given packet. */
+/* Complementary information for the regular tx bd of a given packet */
 struct eth_tx_data_bd {
-	__le16	reserved0;
-	__le16	bitfields;
-#define ETH_TX_DATA_BD_RESERVED1_MASK  0xFF
-#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
-#define ETH_TX_DATA_BD_START_BD_MASK   0x1
-#define ETH_TX_DATA_BD_START_BD_SHIFT  8
-#define ETH_TX_DATA_BD_RESERVED2_MASK  0x7F
-#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+	__le16 reserved0;
+	__le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK	0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT	0
+#define ETH_TX_DATA_BD_START_BD_MASK	0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT	8
+#define ETH_TX_DATA_BD_RESERVED2_MASK	0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT	9
 	__le16 reserved3;
 };
 
 /* The common non-special TX BD ring element */
 struct eth_tx_bd {
-	struct regpair	addr;
-	__le16		nbytes;
-	struct eth_tx_data_bd	data;
+	struct regpair addr;
+	__le16 nbytes;
+	struct eth_tx_data_bd data;
 };
 
 union eth_tx_bd_types {
@@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone {
 /* ETH doorbell data */
 struct eth_db_data {
 	u8 params;
-#define ETH_DB_DATA_DEST_MASK         0x3
-#define ETH_DB_DATA_DEST_SHIFT        0
-#define ETH_DB_DATA_AGG_CMD_MASK      0x3
-#define ETH_DB_DATA_AGG_CMD_SHIFT     2
-#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
-#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
-#define ETH_DB_DATA_RESERVED_MASK     0x1
-#define ETH_DB_DATA_RESERVED_SHIFT    5
-#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ETH_DB_DATA_DEST_MASK		0x3
+#define ETH_DB_DATA_DEST_SHIFT		0
+#define ETH_DB_DATA_AGG_CMD_MASK	0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT	2
+#define ETH_DB_DATA_BYPASS_EN_MASK	0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT	4
+#define ETH_DB_DATA_RESERVED_MASK	0x1
+#define ETH_DB_DATA_RESERVED_SHIFT	5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT	6
 	u8 agg_flags;
 	__le16 bd_prod;
 };
 
+/* RSS hash type */
+enum rss_hash_type {
+	RSS_HASH_TYPE_DEFAULT = 0,
+	RSS_HASH_TYPE_IPV4 = 1,
+	RSS_HASH_TYPE_TCP_IPV4 = 2,
+	RSS_HASH_TYPE_IPV6 = 3,
+	RSS_HASH_TYPE_TCP_IPV6 = 4,
+	RSS_HASH_TYPE_UDP_IPV4 = 5,
+	RSS_HASH_TYPE_UDP_IPV6 = 6,
+	MAX_RSS_HASH_TYPE
+};
+
 #endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 12fc9e7..22077c5 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -8,12 +8,298 @@
 
 #ifndef __FCOE_COMMON__
 #define __FCOE_COMMON__
+
 /*********************/
 /* FCOE FW CONSTANTS */
 /*********************/
 
 #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN	12
 
+/* The fcoe storm task context protection-information of Ystorm */
+struct protection_info_ctx {
+	__le16 flags;
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK		0x3
+#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT	0
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK		0x1
+#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT		2
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK	0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT	3
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK	0xF
+#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT	4
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK	0x1
+#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT	8
+#define PROTECTION_INFO_CTX_RESERVED0_MASK		0x7F
+#define PROTECTION_INFO_CTX_RESERVED0_SHIFT		9
+	u8 dix_block_size;
+	u8 dst_size;
+};
+
+/* The fcoe storm task context protection-information of Ystorm */
+union protection_info_union_ctx {
+	struct protection_info_ctx info;
+	__le32 value;
+};
+
+/* FCP CMD payload */
+struct fcoe_fcp_cmd_payload {
+	__le32 opaque[8];
+};
+
+/* FCP RSP payload */
+struct fcoe_fcp_rsp_payload {
+	__le32 opaque[6];
+};
+
+/* FCP RSP payload */
+struct fcp_rsp_payload_padded {
+	struct fcoe_fcp_rsp_payload rsp_payload;
+	__le32 reserved[2];
+};
+
+/* FCP RSP payload */
+struct fcoe_fcp_xfer_payload {
+	__le32 opaque[3];
+};
+
+/* FCP RSP payload */
+struct fcp_xfer_payload_padded {
+	struct fcoe_fcp_xfer_payload xfer_payload;
+	__le32 reserved[5];
+};
+
+/* Task params */
+struct fcoe_tx_data_params {
+	__le32 data_offset;
+	__le32 offset_in_io;
+	u8 flags;
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK	0x1
+#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT	0
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK		0x1
+#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT		1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK		0x1
+#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT		2
+#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK		0x1F
+#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT		3
+	u8 dif_residual;
+	__le16 seq_cnt;
+	__le16 single_sge_saved_offset;
+	__le16 next_dif_offset;
+	__le16 seq_id;
+	__le16 reserved3;
+};
+
+/* Middle path parameters: FC header fields provided by the driver */
+struct fcoe_tx_mid_path_params {
+	__le32 parameter;
+	u8 r_ctl;
+	u8 type;
+	u8 cs_ctl;
+	u8 df_ctl;
+	__le16 rx_id;
+	__le16 ox_id;
+};
+
+/* Task params */
+struct fcoe_tx_params {
+	struct fcoe_tx_data_params data;
+	struct fcoe_tx_mid_path_params mid_path;
+};
+
+/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */
+union fcoe_tx_info_union_ctx {
+	struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+	struct fcp_rsp_payload_padded fcp_rsp_payload;
+	struct fcp_xfer_payload_padded fcp_xfer_payload;
+	struct fcoe_tx_params tx_params;
+};
+
+/* Data sgl */
+struct fcoe_slow_sgl_ctx {
+	struct regpair base_sgl_addr;
+	__le16 curr_sge_off;
+	__le16 remainder_num_sges;
+	__le16 curr_sgl_index;
+	__le16 reserved;
+};
+
+/* Union of DIX SGL \ cached DIX sges */
+union fcoe_dix_desc_ctx {
+	struct fcoe_slow_sgl_ctx dix_sgl;
+	struct scsi_sge cached_dix_sge;
+};
+
+/* The fcoe storm task context of Ystorm */
+struct ystorm_fcoe_task_st_ctx {
+	u8 task_type;
+	u8 sgl_mode;
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK	0x1
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT	0
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK		0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT		1
+	u8 cached_dix_sge;
+	u8 expect_first_xfer;
+	__le32 num_pbf_zero_write;
+	union protection_info_union_ctx protection_info_union;
+	__le32 data_2_trns_rem;
+	struct scsi_sgl_params sgl_params;
+	u8 reserved1[12];
+	union fcoe_tx_info_union_ctx tx_info_union;
+	union fcoe_dix_desc_ctx dix_desc;
+	struct scsi_cached_sges data_desc;
+	__le16 ox_id;
+	__le16 rx_id;
+	__le32 task_rety_identifier;
+	u8 reserved2[8];
+};
+
+struct e4_ystorm_fcoe_task_ag_ctx {
+	u8 byte0;
+	u8 byte1;
+	__le16 word0;
+	u8 flags0;
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK		0xF
+#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT		6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT		7
+	u8 flags1;
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT		0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT		2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK	0x3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT	4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT		6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT		7
+	u8 flags2;
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT		0
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 byte2;
+	__le32 reg0;
+	u8 byte3;
+	u8 byte4;
+	__le16 rx_id;
+	__le16 word2;
+	__le16 word3;
+	__le16 word4;
+	__le16 word5;
+	__le32 reg1;
+	__le32 reg2;
+};
+
+struct e4_tstorm_fcoe_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 icid;
+	u8 flags0;
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT	6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT			7
+	u8 flags1;
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT	0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT		1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK	0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT	2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK	0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT	4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT		6
+	u8 flags2;
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT		0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT		2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT		4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT	6
+	u8 flags3;
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK		0x3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT		0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT	2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT		3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT			4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT	5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT	6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK		0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT		7
+	u8 flags4;
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT	0
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK	0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT	1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT		2
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT		3
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT		4
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT		5
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT		6
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK			0x1
+#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT		7
+	u8 cleanup_state;
+	__le16 last_sent_tid;
+	__le32 rec_rr_tov_exp_timeout;
+	u8 byte3;
+	u8 byte4;
+	__le16 word2;
+	__le16 word3;
+	__le16 word4;
+	__le32 data_offset_end_of_seq;
+	__le32 data_offset_next;
+};
+
+/* Cached data sges */
+struct fcoe_exp_ro {
+	__le32 data_offset;
+	__le32 reserved;
+};
+
+/* Union of Cleanup address \ expected relative offsets */
+union fcoe_cleanup_addr_exp_ro_union {
+	struct regpair abts_rsp_fc_payload_hi;
+	struct fcoe_exp_ro exp_ro;
+};
+
+/* Fields coppied from ABTSrsp pckt */
 struct fcoe_abts_pkt {
 	__le32 abts_rsp_fc_payload_lo;
 	__le16 abts_rsp_rx_id;
@@ -21,6 +307,215 @@ struct fcoe_abts_pkt {
 	u8 reserved2;
 };
 
+/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */
+struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
+	union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
+	__le16 flags;
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT	0
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT	1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK		0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT	2
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT	3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT	4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK	0x1
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT	5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK		0x3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT	6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK		0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT		8
+	__le16 seq_cnt;
+	u8 seq_id;
+	u8 ooo_rx_seq_id;
+	__le16 rx_id;
+	struct fcoe_abts_pkt abts_data;
+	__le32 e_d_tov_exp_timeout_val;
+	__le16 ooo_rx_seq_cnt;
+	__le16 reserved1;
+};
+
+/* FW read only part The fcoe task storm context of Tstorm */
+struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
+	u8 task_type;
+	u8 dev_type;
+	u8 conf_supported;
+	u8 glbl_q_num;
+	__le32 cid;
+	__le32 fcp_cmd_trns_size;
+	__le32 rsrv;
+};
+
+/** The fcoe task storm context of Tstorm */
+struct tstorm_fcoe_task_st_ctx {
+	struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
+	struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
+};
+
+struct e4_mstorm_fcoe_task_ag_ctx {
+	u8 byte0;
+	u8 byte1;
+	__le16 icid;
+	u8 flags0;
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK		0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT		5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT			6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT			7
+	u8 flags1;
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK		0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT		0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK			0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT			2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK			0x3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT			4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK	0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT	6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT			7
+	u8 flags2;
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT			0
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT		1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT		2
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT		3
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT		4
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT		5
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK	0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT	6
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK			0x1
+#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT		7
+	u8 cleanup_state;
+	__le32 received_bytes;
+	u8 byte3;
+	u8 glbl_q_num;
+	__le16 word1;
+	__le16 tid_to_xfer;
+	__le16 word3;
+	__le16 word4;
+	__le16 word5;
+	__le32 expected_bytes;
+	__le32 reg2;
+};
+
+/* The fcoe task storm context of Mstorm */
+struct mstorm_fcoe_task_st_ctx {
+	struct regpair rsp_buf_addr;
+	__le32 rsrv[2];
+	struct scsi_sgl_params sgl_params;
+	__le32 data_2_trns_rem;
+	__le32 data_buffer_offset;
+	__le16 parent_id;
+	__le16 flags;
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK		0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT		0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK		0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT		4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT		6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK	0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT	7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK		0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT		8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK	0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT	10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT	11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT		12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK		0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT		13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK			0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT			14
+	struct scsi_cached_sges data_desc;
+};
+
+struct e4_ustorm_fcoe_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 icid;
+	u8 flags0;
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK		0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK			0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT			6
+	u8 flags1;
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT		0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK		0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT		2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT		4
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK	0x3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT	6
+	u8 flags2;
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT			0
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT			1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT			2
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT	4
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT		5
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT		6
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK			0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT		7
+	u8 flags3;
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT	0
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT	1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT	2
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT	3
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK	0xF
+#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT	4
+	__le32 dif_err_intervals;
+	__le32 dif_error_1st_interval;
+	__le32 global_cq_num;
+	__le32 reg3;
+	__le32 reg4;
+	__le32 reg5;
+};
+
+/* FCoE task context */
+struct e4_fcoe_task_context {
+	struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+	struct regpair ystorm_st_padding[2];
+	struct tdif_task_context tdif_context;
+	struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+	struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+	struct timers_context timer_context;
+	struct tstorm_fcoe_task_st_ctx tstorm_st_context;
+	struct regpair tstorm_st_padding[2];
+	struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+	struct mstorm_fcoe_task_st_ctx mstorm_st_context;
+	struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+	struct rdif_task_context rdif_context;
+};
+
 /* FCoE additional WQE (Sq/XferQ) information */
 union fcoe_additional_info_union {
 	__le32 previous_tid;
@@ -29,16 +524,6 @@ union fcoe_additional_info_union {
 	__le32 seq_rec_updated_offset;
 };
 
-struct fcoe_exp_ro {
-	__le32 data_offset;
-	__le32 reserved;
-};
-
-union fcoe_cleanup_addr_exp_ro_union {
-	struct regpair abts_rsp_fc_payload_hi;
-	struct fcoe_exp_ro exp_ro;
-};
-
 /* FCoE Ramrod Command IDs */
 enum fcoe_completion_status {
 	FCOE_COMPLETION_STATUS_SUCCESS,
@@ -47,6 +532,7 @@ enum fcoe_completion_status {
 	MAX_FCOE_COMPLETION_STATUS
 };
 
+/* FC address (SID/DID) network presentation */
 struct fc_addr_nw {
 	u8 addr_lo;
 	u8 addr_mid;
@@ -74,30 +560,32 @@ struct fcoe_conn_offload_ramrod_data {
 	__le16 e_d_tov_timer_val;
 	__le16 rx_max_fc_pay_len;
 	__le16 vlan_tag;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK              0xFFF
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT             0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK                  0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT                 12
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK             0x7
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT            13
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK	0xFFF
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT	0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT		12
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK	0x7
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT	13
 	__le16 physical_q0;
 	__le16 rec_rr_tov_timer_val;
 	struct fc_addr_nw s_id;
 	u8 max_conc_seqs_c3;
 	struct fc_addr_nw d_id;
 	u8 flags;
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK  0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK           0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT          1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK          0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT         2
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK          0x1
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT         3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK                 0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT                4
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK            0x3
-#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT           6
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK	0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT	0
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT		1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT		2
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT		3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK	0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT	4
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK			0x3
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT		5
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK		0x1
+#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT		7
 	__le16 conn_id;
 	u8 def_q_idx;
 	u8 reserved[5];
@@ -108,19 +596,14 @@ struct fcoe_conn_terminate_ramrod_data {
 	struct regpair terminate_params_addr;
 };
 
-struct fcoe_slow_sgl_ctx {
-	struct regpair base_sgl_addr;
-	__le16 curr_sge_off;
-	__le16 remainder_num_sges;
-	__le16 curr_sgl_index;
-	__le16 reserved;
+/* FCoE device type */
+enum fcoe_device_type {
+	FCOE_TASK_DEV_TYPE_DISK,
+	FCOE_TASK_DEV_TYPE_TAPE,
+	MAX_FCOE_DEVICE_TYPE
 };
 
-union fcoe_dix_desc_ctx {
-	struct fcoe_slow_sgl_ctx dix_sgl;
-	struct scsi_sge cached_dix_sge;
-};
-
+/* Data sgl */
 struct fcoe_fast_sgl_ctx {
 	struct regpair sgl_start_addr;
 	__le32 sgl_byte_offset;
@@ -128,25 +611,13 @@ struct fcoe_fast_sgl_ctx {
 	__le16 init_offset_in_first_sge;
 };
 
-struct fcoe_fcp_cmd_payload {
-	__le32 opaque[8];
-};
-
-struct fcoe_fcp_rsp_payload {
-	__le32 opaque[6];
-};
-
-struct fcoe_fcp_xfer_payload {
-	__le32 opaque[3];
-};
-
 /* FCoE firmware function init */
 struct fcoe_init_func_ramrod_data {
 	struct scsi_init_func_params func_params;
 	struct scsi_init_func_queues q_params;
 	__le16 mtu;
 	__le16 sq_num_pages_in_pbl;
-	__le32 reserved;
+	__le32 reserved[3];
 };
 
 /* FCoE: Mode of the connection: Target or Initiator or both */
@@ -157,6 +628,7 @@ enum fcoe_mode_type {
 	MAX_FCOE_MODE_TYPE
 };
 
+/* Per PF FCoE receive path statistics - tStorm RAM structure */
 struct fcoe_rx_stat {
 	struct regpair fcoe_rx_byte_cnt;
 	struct regpair fcoe_rx_data_pkt_cnt;
@@ -170,447 +642,47 @@ struct fcoe_rx_stat {
 	__le32 rsrv;
 };
 
+/* FCoE SQE request type */
+enum fcoe_sqe_request_type {
+	SEND_FCOE_CMD,
+	SEND_FCOE_MIDPATH,
+	SEND_FCOE_ABTS_REQUEST,
+	FCOE_EXCHANGE_CLEANUP,
+	FCOE_SEQUENCE_RECOVERY,
+	SEND_FCOE_XFER_RDY,
+	SEND_FCOE_RSP,
+	SEND_FCOE_RSP_WITH_SENSE_DATA,
+	SEND_FCOE_TARGET_DATA,
+	SEND_FCOE_INITIATOR_DATA,
+	SEND_FCOE_XFER_CONTINUATION_RDY,
+	SEND_FCOE_TARGET_ABTS_RSP,
+	MAX_FCOE_SQE_REQUEST_TYPE
+};
+
+/* FCoe statistics request */
 struct fcoe_stat_ramrod_data {
 	struct regpair stat_params_addr;
 };
 
-struct protection_info_ctx {
-	__le16 flags;
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK        0x3
-#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT       0
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK           0x1
-#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT          2
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK  0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK     0xF
-#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT    4
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
-#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8
-#define PROTECTION_INFO_CTX_RESERVED0_MASK             0x7F
-#define PROTECTION_INFO_CTX_RESERVED0_SHIFT            9
-	u8 dix_block_size;
-	u8 dst_size;
+/* FCoE task type */
+enum fcoe_task_type {
+	FCOE_TASK_TYPE_WRITE_INITIATOR,
+	FCOE_TASK_TYPE_READ_INITIATOR,
+	FCOE_TASK_TYPE_MIDPATH,
+	FCOE_TASK_TYPE_UNSOLICITED,
+	FCOE_TASK_TYPE_ABTS,
+	FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+	FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+	FCOE_TASK_TYPE_WRITE_TARGET,
+	FCOE_TASK_TYPE_READ_TARGET,
+	FCOE_TASK_TYPE_RSP,
+	FCOE_TASK_TYPE_RSP_SENSE_DATA,
+	FCOE_TASK_TYPE_ABTS_TARGET,
+	FCOE_TASK_TYPE_ENUM_SIZE,
+	MAX_FCOE_TASK_TYPE
 };
 
-union protection_info_union_ctx {
-	struct protection_info_ctx info;
-	__le32 value;
-};
-
-struct fcp_rsp_payload_padded {
-	struct fcoe_fcp_rsp_payload rsp_payload;
-	__le32 reserved[2];
-};
-
-struct fcp_xfer_payload_padded {
-	struct fcoe_fcp_xfer_payload xfer_payload;
-	__le32 reserved[5];
-};
-
-struct fcoe_tx_data_params {
-	__le32 data_offset;
-	__le32 offset_in_io;
-	u8 flags;
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK  0x1
-#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK           0x1
-#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT          1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK       0x1
-#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT      2
-#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK           0x1F
-#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT          3
-	u8 dif_residual;
-	__le16 seq_cnt;
-	__le16 single_sge_saved_offset;
-	__le16 next_dif_offset;
-	__le16 seq_id;
-	__le16 reserved3;
-};
-
-struct fcoe_tx_mid_path_params {
-	__le32 parameter;
-	u8 r_ctl;
-	u8 type;
-	u8 cs_ctl;
-	u8 df_ctl;
-	__le16 rx_id;
-	__le16 ox_id;
-};
-
-struct fcoe_tx_params {
-	struct fcoe_tx_data_params data;
-	struct fcoe_tx_mid_path_params mid_path;
-};
-
-union fcoe_tx_info_union_ctx {
-	struct fcoe_fcp_cmd_payload fcp_cmd_payload;
-	struct fcp_rsp_payload_padded fcp_rsp_payload;
-	struct fcp_xfer_payload_padded fcp_xfer_payload;
-	struct fcoe_tx_params tx_params;
-};
-
-struct ystorm_fcoe_task_st_ctx {
-	u8 task_type;
-	u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x1
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x7F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        1
-	u8 cached_dix_sge;
-	u8 expect_first_xfer;
-	__le32 num_pbf_zero_write;
-	union protection_info_union_ctx protection_info_union;
-	__le32 data_2_trns_rem;
-	struct scsi_sgl_params sgl_params;
-	u8 reserved1[12];
-	union fcoe_tx_info_union_ctx tx_info_union;
-	union fcoe_dix_desc_ctx dix_desc;
-	struct scsi_cached_sges data_desc;
-	__le16 ox_id;
-	__le16 rx_id;
-	__le32 task_rety_identifier;
-	u8 reserved2[8];
-};
-
-struct ystorm_fcoe_task_ag_ctx {
-	u8 byte0;
-	u8 byte1;
-	__le16 word0;
-	u8 flags0;
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK     0xF
-#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT    0
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT       4
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT       5
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT       6
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT       7
-	u8 flags1;
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK         0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT        0
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK         0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT        2
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
-#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK       0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT      6
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK       0x1
-#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT      7
-	u8 flags2;
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK        0x1
-#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT       0
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT    1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT    2
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT    3
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT    4
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT    5
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT    6
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK     0x1
-#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT    7
-	u8 byte2;
-	__le32 reg0;
-	u8 byte3;
-	u8 byte4;
-	__le16 rx_id;
-	__le16 word2;
-	__le16 word3;
-	__le16 word4;
-	__le16 word5;
-	__le32 reg1;
-	__le32 reg2;
-};
-
-struct tstorm_fcoe_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 icid;
-	u8 flags0;
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
-#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK                0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT               5
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK     0x1
-#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT    6
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK               0x1
-#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT              7
-	u8 flags1;
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT       0
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK                0x1
-#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT               1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT      2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK           0x3
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT          4
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK                 0x3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT                6
-	u8 flags2;
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK      0x3
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT     0
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT      2
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK         0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT        4
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK     0x3
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT    6
-	u8 flags3;
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK       0x3
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT      0
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT   2
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK        0x1
-#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT       3
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK               0x1
-#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT              4
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK   0x1
-#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT  5
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT   6
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK      0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT     7
-	u8 flags4;
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK  0x1
-#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK    0x1
-#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT   1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT            2
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT            3
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT            4
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT            5
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT            6
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK             0x1
-#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT            7
-	u8 cleanup_state;
-	__le16 last_sent_tid;
-	__le32 rec_rr_tov_exp_timeout;
-	u8 byte3;
-	u8 byte4;
-	__le16 word2;
-	__le16 word3;
-	__le16 word4;
-	__le32 data_offset_end_of_seq;
-	__le32 data_offset_next;
-};
-
-struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
-	union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
-	__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT      0
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK        0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       2
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK  0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  5
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK        0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       6
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0xFF
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            8
-	__le16 seq_cnt;
-	u8 seq_id;
-	u8 ooo_rx_seq_id;
-	__le16 rx_id;
-	struct fcoe_abts_pkt abts_data;
-	__le32 e_d_tov_exp_timeout_val;
-	__le16 ooo_rx_seq_cnt;
-	__le16 reserved1;
-};
-
-struct fcoe_tstorm_fcoe_task_st_ctx_read_only {
-	u8 task_type;
-	u8 dev_type;
-	u8 conf_supported;
-	u8 glbl_q_num;
-	__le32 cid;
-	__le32 fcp_cmd_trns_size;
-	__le32 rsrv;
-};
-
-struct tstorm_fcoe_task_st_ctx {
-	struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write;
-	struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
-};
-
-struct mstorm_fcoe_task_ag_ctx {
-	u8 byte0;
-	u8 byte1;
-	__le16 icid;
-	u8 flags0;
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK    0xF
-#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT   0
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK       0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT      4
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK         0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT        5
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK               0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT              6
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK               0x1
-#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT              7
-	u8 flags1;
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK      0x3
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT     0
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK                0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT               2
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK                0x3
-#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT               4
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK   0x1
-#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT  6
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK              0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT             7
-	u8 flags2;
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK              0x1
-#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT             0
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT           1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT           2
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT           3
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT           4
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT           5
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK  0x1
-#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK            0x1
-#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT           7
-	u8 cleanup_state;
-	__le32 received_bytes;
-	u8 byte3;
-	u8 glbl_q_num;
-	__le16 word1;
-	__le16 tid_to_xfer;
-	__le16 word3;
-	__le16 word4;
-	__le16 word5;
-	__le32 expected_bytes;
-	__le32 reg2;
-};
-
-struct mstorm_fcoe_task_st_ctx {
-	struct regpair rsp_buf_addr;
-	__le32 rsrv[2];
-	struct scsi_sgl_params sgl_params;
-	__le32 data_2_trns_rem;
-	__le32 data_buffer_offset;
-	__le16 parent_id;
-	__le16 flags;
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK     0xF
-#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT    0
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK        0x3
-#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT       4
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK           0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT          6
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK  0x1
-#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK        0x3
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT       8
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
-#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK    0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT   11
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK         0x1
-#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT        12
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK           0x1
-#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT          13
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK              0x3
-#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT             14
-	struct scsi_cached_sges data_desc;
-};
-
-struct ustorm_fcoe_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 icid;
-	u8 flags0;
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK  0xF
-#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK     0x1
-#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT    4
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK             0x1
-#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT            5
-#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT             6
-	u8 flags1;
-#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT             0
-#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT             2
-#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK              0x3
-#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT             4
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK     0x3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT    6
-	u8 flags2;
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT           0
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT           1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT           2
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK            0x1
-#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT           3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK  0x1
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT         5
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT         6
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT         7
-	u8 flags3;
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT         0
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT         1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT         2
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK          0x1
-#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT         3
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK   0xF
-#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT  4
-	__le32 dif_err_intervals;
-	__le32 dif_error_1st_interval;
-	__le32 global_cq_num;
-	__le32 reg3;
-	__le32 reg4;
-	__le32 reg5;
-};
-
-struct fcoe_task_context {
-	struct ystorm_fcoe_task_st_ctx ystorm_st_context;
-	struct regpair ystorm_st_padding[2];
-	struct tdif_task_context tdif_context;
-	struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
-	struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
-	struct timers_context timer_context;
-	struct tstorm_fcoe_task_st_ctx tstorm_st_context;
-	struct regpair tstorm_st_padding[2];
-	struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
-	struct mstorm_fcoe_task_st_ctx mstorm_st_context;
-	struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
-	struct rdif_task_context rdif_context;
-};
-
+/* Per PF FCoE transmit path statistics - pStorm RAM structure */
 struct fcoe_tx_stat {
 	struct regpair fcoe_tx_byte_cnt;
 	struct regpair fcoe_tx_data_pkt_cnt;
@@ -618,51 +690,55 @@ struct fcoe_tx_stat {
 	struct regpair fcoe_tx_other_pkt_cnt;
 };
 
+/* FCoE SQ/XferQ element */
 struct fcoe_wqe {
 	__le16 task_id;
 	__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK       0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT      0
-#define FCOE_WQE_SGL_MODE_MASK       0x1
-#define FCOE_WQE_SGL_MODE_SHIFT      4
-#define FCOE_WQE_CONTINUATION_MASK   0x1
-#define FCOE_WQE_CONTINUATION_SHIFT  5
-#define FCOE_WQE_SEND_AUTO_RSP_MASK  0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
-#define FCOE_WQE_RESERVED_MASK       0x1
-#define FCOE_WQE_RESERVED_SHIFT      7
-#define FCOE_WQE_NUM_SGES_MASK       0xF
-#define FCOE_WQE_NUM_SGES_SHIFT      8
-#define FCOE_WQE_RESERVED1_MASK      0xF
-#define FCOE_WQE_RESERVED1_SHIFT     12
+#define FCOE_WQE_REQ_TYPE_MASK		0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT		0
+#define FCOE_WQE_SGL_MODE_MASK		0x1
+#define FCOE_WQE_SGL_MODE_SHIFT		4
+#define FCOE_WQE_CONTINUATION_MASK	0x1
+#define FCOE_WQE_CONTINUATION_SHIFT	5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK	0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT	6
+#define FCOE_WQE_RESERVED_MASK		0x1
+#define FCOE_WQE_RESERVED_SHIFT		7
+#define FCOE_WQE_NUM_SGES_MASK		0xF
+#define FCOE_WQE_NUM_SGES_SHIFT		8
+#define FCOE_WQE_RESERVED1_MASK		0xF
+#define FCOE_WQE_RESERVED1_SHIFT	12
 	union fcoe_additional_info_union additional_info_union;
 };
 
+/* FCoE XFRQ element */
 struct xfrqe_prot_flags {
 	u8 flags;
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
-#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK             0x1
-#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT            4
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK          0x3
-#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT         5
-#define XFRQE_PROT_FLAGS_RESERVED_MASK                0x1
-#define XFRQE_PROT_FLAGS_RESERVED_SHIFT               7
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK	0xF
+#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT	0
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK		0x1
+#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT		4
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK		0x3
+#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT		5
+#define XFRQE_PROT_FLAGS_RESERVED_MASK			0x1
+#define XFRQE_PROT_FLAGS_RESERVED_SHIFT			7
 };
 
+/* FCoE doorbell data */
 struct fcoe_db_data {
 	u8 params;
-#define FCOE_DB_DATA_DEST_MASK         0x3
-#define FCOE_DB_DATA_DEST_SHIFT        0
-#define FCOE_DB_DATA_AGG_CMD_MASK      0x3
-#define FCOE_DB_DATA_AGG_CMD_SHIFT     2
-#define FCOE_DB_DATA_BYPASS_EN_MASK    0x1
-#define FCOE_DB_DATA_BYPASS_EN_SHIFT   4
-#define FCOE_DB_DATA_RESERVED_MASK     0x1
-#define FCOE_DB_DATA_RESERVED_SHIFT    5
-#define FCOE_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define FCOE_DB_DATA_DEST_MASK		0x3
+#define FCOE_DB_DATA_DEST_SHIFT		0
+#define FCOE_DB_DATA_AGG_CMD_MASK	0x3
+#define FCOE_DB_DATA_AGG_CMD_SHIFT	2
+#define FCOE_DB_DATA_BYPASS_EN_MASK	0x1
+#define FCOE_DB_DATA_BYPASS_EN_SHIFT	4
+#define FCOE_DB_DATA_RESERVED_MASK	0x1
+#define FCOE_DB_DATA_RESERVED_SHIFT	5
+#define FCOE_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT	6
 	u8 agg_flags;
 	__le16 sq_prod;
 };
+
 #endif /* __FCOE_COMMON__ */
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 85e086c..4cc9b37 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -32,47 +32,48 @@
 
 #ifndef __ISCSI_COMMON__
 #define __ISCSI_COMMON__
+
 /**********************/
 /* ISCSI FW CONSTANTS */
 /**********************/
 
 /* iSCSI HSI constants */
-#define ISCSI_DEFAULT_MTU       (1500)
+#define ISCSI_DEFAULT_MTU	(1500)
 
 /* KWQ (kernel work queue) layer codes */
-#define ISCSI_SLOW_PATH_LAYER_CODE   (6)
+#define ISCSI_SLOW_PATH_LAYER_CODE	(6)
 
 /* iSCSI parameter defaults */
-#define ISCSI_DEFAULT_HEADER_DIGEST         (0)
-#define ISCSI_DEFAULT_DATA_DIGEST           (0)
-#define ISCSI_DEFAULT_INITIAL_R2T           (1)
-#define ISCSI_DEFAULT_IMMEDIATE_DATA        (1)
-#define ISCSI_DEFAULT_MAX_PDU_LENGTH        (0x2000)
-#define ISCSI_DEFAULT_FIRST_BURST_LENGTH    (0x10000)
-#define ISCSI_DEFAULT_MAX_BURST_LENGTH      (0x40000)
-#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T   (1)
+#define ISCSI_DEFAULT_HEADER_DIGEST		(0)
+#define ISCSI_DEFAULT_DATA_DIGEST		(0)
+#define ISCSI_DEFAULT_INITIAL_R2T		(1)
+#define ISCSI_DEFAULT_IMMEDIATE_DATA		(1)
+#define ISCSI_DEFAULT_MAX_PDU_LENGTH		(0x2000)
+#define ISCSI_DEFAULT_FIRST_BURST_LENGTH	(0x10000)
+#define ISCSI_DEFAULT_MAX_BURST_LENGTH		(0x40000)
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T	(1)
 
 /* iSCSI parameter limits */
-#define ISCSI_MIN_VAL_MAX_PDU_LENGTH        (0x200)
-#define ISCSI_MAX_VAL_MAX_PDU_LENGTH        (0xffffff)
-#define ISCSI_MIN_VAL_BURST_LENGTH          (0x200)
-#define ISCSI_MAX_VAL_BURST_LENGTH          (0xffffff)
-#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T   (1)
-#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T   (0xff)
+#define ISCSI_MIN_VAL_MAX_PDU_LENGTH		(0x200)
+#define ISCSI_MAX_VAL_MAX_PDU_LENGTH		(0xffffff)
+#define ISCSI_MIN_VAL_BURST_LENGTH		(0x200)
+#define ISCSI_MAX_VAL_BURST_LENGTH		(0xffffff)
+#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T	(1)
+#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T	(0xff)
 
-#define ISCSI_AHS_CNTL_SIZE 4
+#define ISCSI_AHS_CNTL_SIZE	4
 
-#define ISCSI_WQE_NUM_SGES_SLOWIO           (0xf)
+#define ISCSI_WQE_NUM_SGES_SLOWIO	(0xf)
 
 /* iSCSI reserved params */
 #define ISCSI_ITT_ALL_ONES	(0xffffffff)
 #define ISCSI_TTT_ALL_ONES	(0xffffffff)
 
-#define ISCSI_OPTION_1_OFF_CHIP_TCP 1
-#define ISCSI_OPTION_2_ON_CHIP_TCP 2
+#define ISCSI_OPTION_1_OFF_CHIP_TCP	1
+#define ISCSI_OPTION_2_ON_CHIP_TCP	2
 
-#define ISCSI_INITIATOR_MODE 0
-#define ISCSI_TARGET_MODE 1
+#define ISCSI_INITIATOR_MODE	0
+#define ISCSI_TARGET_MODE	1
 
 /* iSCSI request op codes */
 #define ISCSI_OPCODE_NOP_OUT		(0)
@@ -84,41 +85,48 @@
 #define ISCSI_OPCODE_LOGOUT_REQUEST	(6)
 
 /* iSCSI response/messages op codes */
-#define ISCSI_OPCODE_NOP_IN             (0x20)
-#define ISCSI_OPCODE_SCSI_RESPONSE      (0x21)
-#define ISCSI_OPCODE_TMF_RESPONSE       (0x22)
-#define ISCSI_OPCODE_LOGIN_RESPONSE     (0x23)
-#define ISCSI_OPCODE_TEXT_RESPONSE      (0x24)
-#define ISCSI_OPCODE_DATA_IN            (0x25)
-#define ISCSI_OPCODE_LOGOUT_RESPONSE    (0x26)
-#define ISCSI_OPCODE_R2T                (0x31)
-#define ISCSI_OPCODE_ASYNC_MSG          (0x32)
-#define ISCSI_OPCODE_REJECT             (0x3f)
+#define ISCSI_OPCODE_NOP_IN		(0x20)
+#define ISCSI_OPCODE_SCSI_RESPONSE	(0x21)
+#define ISCSI_OPCODE_TMF_RESPONSE	(0x22)
+#define ISCSI_OPCODE_LOGIN_RESPONSE	(0x23)
+#define ISCSI_OPCODE_TEXT_RESPONSE	(0x24)
+#define ISCSI_OPCODE_DATA_IN		(0x25)
+#define ISCSI_OPCODE_LOGOUT_RESPONSE	(0x26)
+#define ISCSI_OPCODE_R2T		(0x31)
+#define ISCSI_OPCODE_ASYNC_MSG		(0x32)
+#define ISCSI_OPCODE_REJECT		(0x3f)
 
 /* iSCSI stages */
-#define ISCSI_STAGE_SECURITY_NEGOTIATION            (0)
-#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION   (1)
-#define ISCSI_STAGE_FULL_FEATURE_PHASE              (3)
+#define ISCSI_STAGE_SECURITY_NEGOTIATION		(0)
+#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION	(1)
+#define ISCSI_STAGE_FULL_FEATURE_PHASE			(3)
 
 /* iSCSI CQE errors */
-#define CQE_ERROR_BITMAP_DATA_DIGEST          (0x08)
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN  (0x10)
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED       (0x20)
+#define CQE_ERROR_BITMAP_DATA_DIGEST		(0x08)
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN	(0x10)
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED		(0x20)
 
+/* Union of data bd_opaque/ tq_tid */
+union bd_opaque_tq_union {
+	__le16 bd_opaque;
+	__le16 tq_tid;
+};
+
+/* ISCSI SGL entry */
 struct cqe_error_bitmap {
 	u8 cqe_error_status_bits;
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK         0x7
-#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT        0
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK      0x1
-#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT     3
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK  0x1
-#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK   0x1
-#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT  5
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK        0x1
-#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT       6
-#define CQE_ERROR_BITMAP_RESERVED2_MASK            0x1
-#define CQE_ERROR_BITMAP_RESERVED2_SHIFT           7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK		0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT		0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK		0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT		3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK	0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT	4
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK	0x1
+#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT	5
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK		0x1
+#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT		6
+#define CQE_ERROR_BITMAP_RESERVED2_MASK			0x1
+#define CQE_ERROR_BITMAP_RESERVED2_SHIFT		7
 };
 
 union cqe_error_status {
@@ -126,86 +134,133 @@ union cqe_error_status {
 	struct cqe_error_bitmap error_bits;
 };
 
+/* iSCSI Login Response PDU header */
 struct data_hdr {
 	__le32 data[12];
 };
 
-struct iscsi_async_msg_hdr {
-	__le16 reserved0;
-	u8 flags_attr;
-#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK           0x7F
-#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT          0
-#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK         0x1
-#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT        7
-	u8 opcode;
-	__le32 hdr_second_dword;
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24
-	struct regpair lun;
-	__le32 all_ones;
-	__le32 reserved1;
-	__le32 stat_sn;
-	__le32 exp_cmd_sn;
-	__le32 max_cmd_sn;
-	__le16 param1_rsrv;
-	u8 async_vcode;
-	u8 async_event;
-	__le16 param3_rsrv;
-	__le16 param2_rsrv;
-	__le32 reserved7;
+struct lun_mapper_addr_reserved {
+	struct regpair lun_mapper_addr;
+	u8 reserved0[8];
 };
 
-struct iscsi_cmd_hdr {
-	__le16 reserved1;
-	u8 flags_attr;
-#define ISCSI_CMD_HDR_ATTR_MASK		0x7
-#define ISCSI_CMD_HDR_ATTR_SHIFT	0
-#define ISCSI_CMD_HDR_RSRV_MASK		0x3
-#define ISCSI_CMD_HDR_RSRV_SHIFT	3
-#define ISCSI_CMD_HDR_WRITE_MASK	0x1
-#define ISCSI_CMD_HDR_WRITE_SHIFT	5
-#define ISCSI_CMD_HDR_READ_MASK		0x1
-#define ISCSI_CMD_HDR_READ_SHIFT	6
-#define ISCSI_CMD_HDR_FINAL_MASK	0x1
-#define ISCSI_CMD_HDR_FINAL_SHIFT	7
-	u8 hdr_first_byte;
-#define ISCSI_CMD_HDR_OPCODE_MASK	0x3F
-#define ISCSI_CMD_HDR_OPCODE_SHIFT	0
-#define ISCSI_CMD_HDR_IMM_MASK		0x1
-#define ISCSI_CMD_HDR_IMM_SHIFT		6
-#define ISCSI_CMD_HDR_RSRV1_MASK	0x1
-#define ISCSI_CMD_HDR_RSRV1_SHIFT	7
-	__le32 hdr_second_dword;
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24
-	struct regpair lun;
-	__le32 itt;
-	__le32 expected_transfer_length;
-	__le32 cmd_sn;
-	__le32 exp_stat_sn;
-	__le32 cdb[4];
+/* rdif conetxt for dif on immediate */
+struct dif_on_immediate_params {
+	__le32 initial_ref_tag;
+	__le16 application_tag;
+	__le16 application_tag_mask;
+	__le16 flags1;
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT		0
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT		1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT		2
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT		3
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT		4
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT		5
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT		6
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT		7
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK		0x3
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT		8
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK		0xF
+#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT		10
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK	0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT	14
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK	0x1
+#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT	15
+	u8 flags0;
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK			0x1
+#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT			0
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT		1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK	0x1
+#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT	2
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT		3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK		0x3
+#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT		4
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK			0x1
+#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT			6
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK		0x1
+#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT	7
+	u8 reserved_zero[5];
 };
 
+/* iSCSI dif on immediate mode attributes union */
+union dif_configuration_params {
+	struct lun_mapper_addr_reserved lun_mapper_address;
+	struct dif_on_immediate_params def_dif_conf;
+};
+
+/* Union of data/r2t sequence number */
+union iscsi_seq_num {
+	__le16 data_sn;
+	__le16 r2t_sn;
+};
+
+/* iSCSI DIF flags */
+struct iscsi_dif_flags {
+	u8 flags;
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK	0xF
+#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT	0
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK		0x1
+#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT		4
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK		0x7
+#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT		5
+};
+
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_state {
+	struct scsi_cached_sges data_desc;
+	struct scsi_sgl_params sgl_params;
+	__le32 exp_r2t_sn;
+	__le32 buffer_offset;
+	union iscsi_seq_num seq_num;
+	struct iscsi_dif_flags dif_flags;
+	u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK		0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT	0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK		0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT		1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK	0x1
+#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT	2
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK		0x1F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT		3
+};
+
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_rxmit_opt {
+	__le32 fast_rxmit_sge_offset;
+	__le32 scan_start_buffer_offset;
+	__le32 fast_rxmit_buffer_offset;
+	u8 scan_start_sgl_index;
+	u8 fast_rxmit_sgl_index;
+	__le16 reserved;
+};
+
+/* iSCSI Common PDU header */
 struct iscsi_common_hdr {
 	u8 hdr_status;
 	u8 hdr_response;
 	u8 hdr_flags;
 	u8 hdr_first_byte;
-#define ISCSI_COMMON_HDR_OPCODE_MASK         0x3F
-#define ISCSI_COMMON_HDR_OPCODE_SHIFT        0
-#define ISCSI_COMMON_HDR_IMM_MASK            0x1
-#define ISCSI_COMMON_HDR_IMM_SHIFT           6
-#define ISCSI_COMMON_HDR_RSRV_MASK           0x1
-#define ISCSI_COMMON_HDR_RSRV_SHIFT          7
+#define ISCSI_COMMON_HDR_OPCODE_MASK		0x3F
+#define ISCSI_COMMON_HDR_OPCODE_SHIFT		0
+#define ISCSI_COMMON_HDR_IMM_MASK		0x1
+#define ISCSI_COMMON_HDR_IMM_SHIFT		6
+#define ISCSI_COMMON_HDR_RSRV_MASK		0x1
+#define ISCSI_COMMON_HDR_RSRV_SHIFT		7
 	__le32 hdr_second_dword;
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun_reserved;
 	__le32 itt;
 	__le32 ttt;
@@ -215,86 +270,60 @@ struct iscsi_common_hdr {
 	__le32 data[3];
 };
 
-struct iscsi_conn_offload_params {
-	struct regpair sq_pbl_addr;
-	struct regpair r2tq_pbl_addr;
-	struct regpair xhq_pbl_addr;
-	struct regpair uhq_pbl_addr;
-	__le32 initial_ack;
-	__le16 physical_q0;
-	__le16 physical_q1;
-	u8 flags;
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK  0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK	0x1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT	2
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK	0x1F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT	3
-	u8 pbl_page_size_log;
-	u8 pbe_page_size_log;
-	u8 default_cq;
-	__le32 stat_sn;
-};
-
-struct iscsi_slow_path_hdr {
-	u8 op_code;
-	u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK   0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT  0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK  0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK   0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT  7
-};
-
-struct iscsi_conn_update_ramrod_params {
-	struct iscsi_slow_path_hdr hdr;
-	__le16 conn_id;
-	__le32 fw_cid;
-	u8 flags;
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK           0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT          0
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK           0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT          1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK     0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT    2
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK  0x1
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0x3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      6
-	u8 reserved0[3];
-	__le32 max_seq_size;
-	__le32 max_send_pdu_length;
-	__le32 max_recv_pdu_length;
-	__le32 first_seq_length;
+/* iSCSI Command PDU header */
+struct iscsi_cmd_hdr {
+	__le16 reserved1;
+	u8 flags_attr;
+#define ISCSI_CMD_HDR_ATTR_MASK			0x7
+#define ISCSI_CMD_HDR_ATTR_SHIFT		0
+#define ISCSI_CMD_HDR_RSRV_MASK			0x3
+#define ISCSI_CMD_HDR_RSRV_SHIFT		3
+#define ISCSI_CMD_HDR_WRITE_MASK		0x1
+#define ISCSI_CMD_HDR_WRITE_SHIFT		5
+#define ISCSI_CMD_HDR_READ_MASK			0x1
+#define ISCSI_CMD_HDR_READ_SHIFT		6
+#define ISCSI_CMD_HDR_FINAL_MASK		0x1
+#define ISCSI_CMD_HDR_FINAL_SHIFT		7
+	u8 hdr_first_byte;
+#define ISCSI_CMD_HDR_OPCODE_MASK		0x3F
+#define ISCSI_CMD_HDR_OPCODE_SHIFT		0
+#define ISCSI_CMD_HDR_IMM_MASK			0x1
+#define ISCSI_CMD_HDR_IMM_SHIFT			6
+#define ISCSI_CMD_HDR_RSRV1_MASK		0x1
+#define ISCSI_CMD_HDR_RSRV1_SHIFT		7
+	__le32 hdr_second_dword;
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK		0xFFFFFF
+#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT	24
+	struct regpair lun;
+	__le32 itt;
+	__le32 expected_transfer_length;
+	__le32 cmd_sn;
 	__le32 exp_stat_sn;
+	__le32 cdb[4];
 };
 
+/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */
 struct iscsi_ext_cdb_cmd_hdr {
 	__le16 reserved1;
 	u8 flags_attr;
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK          0x7
-#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT         0
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK          0x3
-#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT         3
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK         0x1
-#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT        5
-#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK          0x1
-#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT         6
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK         0x1
-#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT        7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK		0x7
+#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT	0
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK		0x3
+#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT	3
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK	0x1
+#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT	5
+#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK		0x1
+#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT	6
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK	0x1
+#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK  0xFFFFFF
-#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK      0xFF
-#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT     24
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK		0xFFFFFF
+#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK		0xFF
+#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT		24
 	struct regpair lun;
 	__le32 itt;
 	__le32 expected_transfer_length;
@@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr {
 	struct scsi_sge cdb_sge;
 };
 
+/* iSCSI login request PDU header */
 struct iscsi_login_req_hdr {
 	u8 version_min;
 	u8 version_max;
 	u8 flags_attr;
-#define ISCSI_LOGIN_REQ_HDR_NSG_MASK            0x3
-#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT           0
-#define ISCSI_LOGIN_REQ_HDR_CSG_MASK            0x3
-#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT           2
-#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK           0x3
-#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT          4
-#define ISCSI_LOGIN_REQ_HDR_C_MASK              0x1
-#define ISCSI_LOGIN_REQ_HDR_C_SHIFT             6
-#define ISCSI_LOGIN_REQ_HDR_T_MASK              0x1
-#define ISCSI_LOGIN_REQ_HDR_T_SHIFT             7
+#define ISCSI_LOGIN_REQ_HDR_NSG_MASK	0x3
+#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT	0
+#define ISCSI_LOGIN_REQ_HDR_CSG_MASK	0x3
+#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT	2
+#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK	0x3
+#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT	4
+#define ISCSI_LOGIN_REQ_HDR_C_MASK	0x1
+#define ISCSI_LOGIN_REQ_HDR_C_SHIFT	6
+#define ISCSI_LOGIN_REQ_HDR_T_MASK	0x1
+#define ISCSI_LOGIN_REQ_HDR_T_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT	24
 	__le32 isid_tabc;
 	__le16 tsih;
 	__le16 isid_d;
@@ -334,6 +364,7 @@ struct iscsi_login_req_hdr {
 	__le32 reserved2[4];
 };
 
+/* iSCSI logout request PDU header */
 struct iscsi_logout_req_hdr {
 	__le16 reserved0;
 	u8 reason_code;
@@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr {
 	__le32 reserved4[4];
 };
 
+/* iSCSI Data-out PDU header */
 struct iscsi_data_out_hdr {
 	__le16 reserved1;
 	u8 flags_attr;
-#define ISCSI_DATA_OUT_HDR_RSRV_MASK   0x7F
-#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT  0
-#define ISCSI_DATA_OUT_HDR_FINAL_MASK  0x1
-#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7
+#define ISCSI_DATA_OUT_HDR_RSRV_MASK	0x7F
+#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT	0
+#define ISCSI_DATA_OUT_HDR_FINAL_MASK	0x1
+#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT	7
 	u8 opcode;
 	__le32 reserved2;
 	struct regpair lun;
@@ -368,22 +400,23 @@ struct iscsi_data_out_hdr {
 	__le32 reserved5;
 };
 
+/* iSCSI Data-in PDU header */
 struct iscsi_data_in_hdr {
 	u8 status_rsvd;
 	u8 reserved1;
 	u8 flags;
-#define ISCSI_DATA_IN_HDR_STATUS_MASK     0x1
-#define ISCSI_DATA_IN_HDR_STATUS_SHIFT    0
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK  0x1
-#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK   0x1
-#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT  2
-#define ISCSI_DATA_IN_HDR_RSRV_MASK       0x7
-#define ISCSI_DATA_IN_HDR_RSRV_SHIFT      3
-#define ISCSI_DATA_IN_HDR_ACK_MASK        0x1
-#define ISCSI_DATA_IN_HDR_ACK_SHIFT       6
-#define ISCSI_DATA_IN_HDR_FINAL_MASK      0x1
-#define ISCSI_DATA_IN_HDR_FINAL_SHIFT     7
+#define ISCSI_DATA_IN_HDR_STATUS_MASK		0x1
+#define ISCSI_DATA_IN_HDR_STATUS_SHIFT		0
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK	0x1
+#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT	1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK		0x1
+#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT	2
+#define ISCSI_DATA_IN_HDR_RSRV_MASK		0x7
+#define ISCSI_DATA_IN_HDR_RSRV_SHIFT		3
+#define ISCSI_DATA_IN_HDR_ACK_MASK		0x1
+#define ISCSI_DATA_IN_HDR_ACK_SHIFT		6
+#define ISCSI_DATA_IN_HDR_FINAL_MASK		0x1
+#define ISCSI_DATA_IN_HDR_FINAL_SHIFT		7
 	u8 opcode;
 	__le32 reserved2;
 	struct regpair lun;
@@ -397,6 +430,7 @@ struct iscsi_data_in_hdr {
 	__le32 residual_count;
 };
 
+/* iSCSI R2T PDU header */
 struct iscsi_r2t_hdr {
 	u8 reserved0[3];
 	u8 opcode;
@@ -412,13 +446,14 @@ struct iscsi_r2t_hdr {
 	__le32 desired_data_trns_len;
 };
 
+/* iSCSI NOP-out PDU header */
 struct iscsi_nop_out_hdr {
 	__le16 reserved1;
 	u8 flags_attr;
-#define ISCSI_NOP_OUT_HDR_RSRV_MASK    0x7F
-#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT   0
-#define ISCSI_NOP_OUT_HDR_CONST1_MASK  0x1
-#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7
+#define ISCSI_NOP_OUT_HDR_RSRV_MASK	0x7F
+#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT	0
+#define ISCSI_NOP_OUT_HDR_CONST1_MASK	0x1
+#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT	7
 	u8 opcode;
 	__le32 reserved2;
 	struct regpair lun;
@@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr {
 	__le32 reserved6;
 };
 
+/* iSCSI NOP-in PDU header */
 struct iscsi_nop_in_hdr {
 	__le16 reserved0;
 	u8 flags_attr;
-#define ISCSI_NOP_IN_HDR_RSRV_MASK           0x7F
-#define ISCSI_NOP_IN_HDR_RSRV_SHIFT          0
-#define ISCSI_NOP_IN_HDR_CONST1_MASK         0x1
-#define ISCSI_NOP_IN_HDR_CONST1_SHIFT        7
+#define ISCSI_NOP_IN_HDR_RSRV_MASK	0x7F
+#define ISCSI_NOP_IN_HDR_RSRV_SHIFT	0
+#define ISCSI_NOP_IN_HDR_CONST1_MASK	0x1
+#define ISCSI_NOP_IN_HDR_CONST1_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 ttt;
@@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr {
 	__le32 reserved7;
 };
 
+/* iSCSI Login Response PDU header */
 struct iscsi_login_response_hdr {
 	u8 version_active;
 	u8 version_max;
 	u8 flags_attr;
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK            0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT           0
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK            0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT           2
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK           0x3
-#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT          4
-#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK              0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT             6
-#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK              0x1
-#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT             7
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK	0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT	0
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK	0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT	2
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK	0x3
+#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT	4
+#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK		0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT	6
+#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK		0x1
+#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT	7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	__le32 isid_tabc;
 	__le16 tsih;
 	__le16 isid_d;
@@ -490,16 +527,17 @@ struct iscsi_login_response_hdr {
 	__le32 reserved4[2];
 };
 
+/* iSCSI Logout Response PDU header */
 struct iscsi_logout_response_hdr {
 	u8 reserved1;
 	u8 response;
 	u8 flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	__le32 reserved2[2];
 	__le32 itt;
 	__le32 reserved3;
@@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr {
 	__le32 reserved5[1];
 };
 
+/* iSCSI Text Request PDU header */
 struct iscsi_text_request_hdr {
 	__le16 reserved0;
 	u8 flags_attr;
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK           0x3F
-#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT          0
-#define ISCSI_TEXT_REQUEST_HDR_C_MASK              0x1
-#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT             6
-#define ISCSI_TEXT_REQUEST_HDR_F_MASK              0x1
-#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT             7
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK	0x3F
+#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT	0
+#define ISCSI_TEXT_REQUEST_HDR_C_MASK		0x1
+#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT		6
+#define ISCSI_TEXT_REQUEST_HDR_F_MASK		0x1
+#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT		7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 ttt;
@@ -535,21 +574,22 @@ struct iscsi_text_request_hdr {
 	__le32 reserved4[4];
 };
 
+/* iSCSI Text Response PDU header */
 struct iscsi_text_response_hdr {
 	__le16 reserved1;
 	u8 flags;
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK           0x3F
-#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT          0
-#define ISCSI_TEXT_RESPONSE_HDR_C_MASK              0x1
-#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT             6
-#define ISCSI_TEXT_RESPONSE_HDR_F_MASK              0x1
-#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT             7
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK	0x3F
+#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT	0
+#define ISCSI_TEXT_RESPONSE_HDR_C_MASK		0x1
+#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT		6
+#define ISCSI_TEXT_RESPONSE_HDR_F_MASK		0x1
+#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT		7
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 ttt;
@@ -559,15 +599,16 @@ struct iscsi_text_response_hdr {
 	__le32 reserved4[3];
 };
 
+/* iSCSI TMF Request PDU header */
 struct iscsi_tmf_request_hdr {
 	__le16 reserved0;
 	u8 function;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK		0xFFFFFF
+#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 rtt;
@@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr {
 	u8 hdr_flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair reserved0;
 	__le32 itt;
 	__le32 reserved1;
@@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr {
 	__le32 reserved4[3];
 };
 
+/* iSCSI Response PDU header */
 struct iscsi_response_hdr {
 	u8 hdr_status;
 	u8 hdr_response;
 	u8 hdr_flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair lun;
 	__le32 itt;
 	__le32 snack_tag;
@@ -618,16 +660,17 @@ struct iscsi_response_hdr {
 	__le32 residual_count;
 };
 
+/* iSCSI Reject PDU header */
 struct iscsi_reject_hdr {
 	u8 reserved4;
 	u8 hdr_reason;
 	u8 hdr_flags;
 	u8 opcode;
 	__le32 hdr_second_dword;
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK   0xFFFFFF
-#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT  0
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK  0xFF
-#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT	24
 	struct regpair reserved0;
 	__le32 all_ones;
 	__le32 reserved2;
@@ -638,6 +681,35 @@ struct iscsi_reject_hdr {
 	__le32 reserved3[2];
 };
 
+/* iSCSI Asynchronous Message PDU header */
+struct iscsi_async_msg_hdr {
+	__le16 reserved0;
+	u8 flags_attr;
+#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK		0x7F
+#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT		0
+#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK		0x1
+#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT	7
+	u8 opcode;
+	__le32 hdr_second_dword;
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK	0xFFFFFF
+#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT	0
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK	0xFF
+#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT	24
+	struct regpair lun;
+	__le32 all_ones;
+	__le32 reserved1;
+	__le32 stat_sn;
+	__le32 exp_cmd_sn;
+	__le32 max_cmd_sn;
+	__le16 param1_rsrv;
+	u8 async_vcode;
+	u8 async_event;
+	__le16 param3_rsrv;
+	__le16 param2_rsrv;
+	__le32 reserved7;
+};
+
+/* PDU header part of Ystorm task context */
 union iscsi_task_hdr {
 	struct iscsi_common_hdr common;
 	struct data_hdr data;
@@ -661,6 +733,348 @@ union iscsi_task_hdr {
 	struct iscsi_async_msg_hdr async_msg;
 };
 
+/* The iscsi storm task context of Ystorm */
+struct ystorm_iscsi_task_st_ctx {
+	struct ystorm_iscsi_task_state state;
+	struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
+	union iscsi_task_hdr pdu_hdr;
+};
+
+struct e4_ystorm_iscsi_task_ag_ctx {
+	u8 reserved;
+	u8 byte1;
+	__le16 word0;
+	u8 flags0;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK	0xF
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT		6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT		7
+	u8 flags1;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK		0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT		0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK		0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT		2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK	0x3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT	4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT		6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT		7
+	u8 flags2;
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK		0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT		0
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK	0x1
+#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 byte2;
+	__le32 TTT;
+	u8 byte3;
+	u8 byte4;
+	__le16 word1;
+};
+
+struct e4_mstorm_iscsi_task_ag_ctx {
+	u8 cdu_validation;
+	u8 byte1;
+	__le16 task_cid;
+	u8 flags0;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK	0xF
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK			0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT			6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT	7
+	u8 flags1;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK	0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT	0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK			0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT			2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK			0x3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT			4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT	6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK			0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT			7
+	u8 flags2;
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK		0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT		0
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT	1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT	2
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT	3
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT	4
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT	5
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT	6
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK	0x1
+#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT	7
+	u8 byte2;
+	__le32 reg0;
+	u8 byte3;
+	u8 byte4;
+	__le16 word1;
+};
+
+struct e4_ustorm_iscsi_task_ag_ctx {
+	u8 reserved;
+	u8 state;
+	__le16 icid;
+	u8 flags0;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK	0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT	0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT		4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK			0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT			5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK		0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT		6
+	u8 flags1;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK	0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT	0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK	0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT	2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK		0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT		4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK	0x3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT	6
+	u8 flags2;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT	0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT	1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT		2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK			0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT			3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT	4
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT	5
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT		6
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK	0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT	7
+	u8 flags3;
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT		0
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT		1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT		2
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK		0x1
+#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT		3
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK		0xF
+#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT	4
+	__le32 dif_err_intervals;
+	__le32 dif_error_1st_interval;
+	__le32 rcv_cont_len;
+	__le32 exp_cont_len;
+	__le32 total_data_acked;
+	__le32 exp_data_acked;
+	u8 next_tid_valid;
+	u8 byte3;
+	__le16 word1;
+	__le16 next_tid;
+	__le16 word3;
+	__le32 hdr_residual_count;
+	__le32 exp_r2t_sn;
+};
+
+/* The iscsi storm task context of Mstorm */
+struct mstorm_iscsi_task_st_ctx {
+	struct scsi_cached_sges data_desc;
+	struct scsi_sgl_params sgl_params;
+	__le32 rem_task_size;
+	__le32 data_buffer_offset;
+	u8 task_type;
+	struct iscsi_dif_flags dif_flags;
+	__le16 dif_task_icid;
+	struct regpair sense_db;
+	__le32 expected_itt;
+	__le32 reserved1;
+};
+
+struct iscsi_reg1 {
+	__le32 reg1_map;
+#define ISCSI_REG1_NUM_SGES_MASK	0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT	0
+#define ISCSI_REG1_RESERVED1_MASK	0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT	4
+};
+
+struct tqe_opaque {
+	__le16 opaque[2];
+};
+
+/* The iscsi storm task context of Ustorm */
+struct ustorm_iscsi_task_st_ctx {
+	__le32 rem_rcv_len;
+	__le32 exp_data_transfer_len;
+	__le32 exp_data_sn;
+	struct regpair lun;
+	struct iscsi_reg1 reg1;
+	u8 flags2;
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT	0
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK		0x7F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT	1
+	struct iscsi_dif_flags dif_flags;
+	__le16 reserved3;
+	struct tqe_opaque tqe_opaque_list;
+	__le32 reserved5;
+	__le32 reserved6;
+	__le32 reserved7;
+	u8 task_type;
+	u8 error_flags;
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT	0
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK	0x1
+#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT	1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT		2
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK			0x1F
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT		3
+	u8 flags;
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK			0x3
+#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT		0
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT		2
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT		3
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK	0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT	4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT		5
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK		0x1
+#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT		6
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK			0x1
+#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT		7
+	u8 cq_rss_number;
+};
+
+/* iscsi task context */
+struct e4_iscsi_task_context {
+	struct ystorm_iscsi_task_st_ctx ystorm_st_context;
+	struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+	struct regpair ystorm_ag_padding[2];
+	struct tdif_task_context tdif_context;
+	struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+	struct regpair mstorm_ag_padding[2];
+	struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+	struct mstorm_iscsi_task_st_ctx mstorm_st_context;
+	struct ustorm_iscsi_task_st_ctx ustorm_st_context;
+	struct rdif_task_context rdif_context;
+};
+
+/* iSCSI connection offload params passed by driver to FW in ISCSI offload
+ * ramrod.
+ */
+struct iscsi_conn_offload_params {
+	struct regpair sq_pbl_addr;
+	struct regpair r2tq_pbl_addr;
+	struct regpair xhq_pbl_addr;
+	struct regpair uhq_pbl_addr;
+	__le32 initial_ack;
+	__le16 physical_q0;
+	__le16 physical_q1;
+	u8 flags;
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK	0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT	0
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK	0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT	1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK	0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT	2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK	0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT	3
+	u8 pbl_page_size_log;
+	u8 pbe_page_size_log;
+	u8 default_cq;
+	__le32 stat_sn;
+};
+
+/* iSCSI connection statistics */
+struct iscsi_conn_stats_params {
+	struct regpair iscsi_tcp_tx_packets_cnt;
+	struct regpair iscsi_tcp_tx_bytes_cnt;
+	struct regpair iscsi_tcp_tx_rxmit_cnt;
+	struct regpair iscsi_tcp_rx_packets_cnt;
+	struct regpair iscsi_tcp_rx_bytes_cnt;
+	struct regpair iscsi_tcp_rx_dup_ack_cnt;
+	__le32 iscsi_tcp_rx_chksum_err_cnt;
+	__le32 reserved;
+};
+
+/* spe message header */
+struct iscsi_slow_path_hdr {
+	u8 op_code;
+	u8 flags;
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK	0xF
+#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT	0
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK	0x7
+#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT	4
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK	0x1
+#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT	7
+};
+
+/* iSCSI connection update params passed by driver to FW in ISCSI update
+ *ramrod.
+ */
+struct iscsi_conn_update_ramrod_params {
+	struct iscsi_slow_path_hdr hdr;
+	__le16 conn_id;
+	__le32 fw_cid;
+	u8 flags;
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK		0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT		0
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK		0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT		1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT	2
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT	3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT	4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT	5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT	6
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK	0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT	7
+	u8 reserved0[3];
+	__le32 max_seq_size;
+	__le32 max_send_pdu_length;
+	__le32 max_recv_pdu_length;
+	__le32 first_seq_length;
+	__le32 exp_stat_sn;
+	union dif_configuration_params dif_on_imme_params;
+};
+
+/* iSCSI CQ element */
 struct iscsi_cqe_common {
 	__le16 conn_id;
 	u8 cqe_type;
@@ -669,6 +1083,7 @@ struct iscsi_cqe_common {
 	union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 struct iscsi_cqe_solicited {
 	__le16 conn_id;
 	u8 cqe_type;
@@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited {
 	u8 fw_dbg_field;
 	u8 caused_conn_err;
 	u8 reserved0[3];
-	__le32 reserved1[1];
+	__le32 data_truncated_bytes;
 	union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 struct iscsi_cqe_unsolicited {
 	__le16 conn_id;
 	u8 cqe_type;
@@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited {
 	__le16 reserved0;
 	u8 reserved1;
 	u8 unsol_cqe_type;
-	struct regpair rqe_opaque;
+	__le16 rqe_opaque;
+	__le16 reserved2[3];
 	union iscsi_task_hdr iscsi_hdr;
 };
 
+/* iSCSI CQ element */
 union iscsi_cqe {
 	struct iscsi_cqe_common cqe_common;
 	struct iscsi_cqe_solicited cqe_solicited;
 	struct iscsi_cqe_unsolicited cqe_unsolicited;
 };
 
+/* iSCSI CQE type */
 enum iscsi_cqes_type {
 	ISCSI_CQE_TYPE_SOLICITED = 1,
 	ISCSI_CQE_TYPE_UNSOLICITED,
@@ -708,6 +1127,7 @@ enum iscsi_cqes_type {
 	MAX_ISCSI_CQES_TYPE
 };
 
+/* iSCSI CQE type */
 enum iscsi_cqe_unsolicited_type {
 	ISCSI_CQE_UNSOLICITED_NONE,
 	ISCSI_CQE_UNSOLICITED_SINGLE,
@@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type {
 	MAX_ISCSI_CQE_UNSOLICITED_TYPE
 };
 
-
+/* iscsi debug modes */
 struct iscsi_debug_modes {
 	u8 flags;
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK         0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT        0
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK            0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT           1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT             2
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK          0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT         3
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK  0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK     0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT    6
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK             0x1
-#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT            7
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT			0
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT			1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT			2
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT			3
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK		0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT		4
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT			5
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK	0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT	6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK			0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT			7
 };
 
-struct iscsi_dif_flags {
-	u8 flags;
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK  0xF
-#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK             0x1
-#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT            4
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK          0x7
-#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT         5
-};
-
+/* iSCSI kernel completion queue IDs */
 enum iscsi_eqe_opcode {
 	ISCSI_EVENT_TYPE_INIT_FUNC = 0,
 	ISCSI_EVENT_TYPE_DESTROY_FUNC,
@@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode {
 	ISCSI_EVENT_TYPE_CLEAR_SQ,
 	ISCSI_EVENT_TYPE_TERMINATE_CONN,
 	ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
+	ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
 	ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
 	ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
-	RESERVED9,
 	ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
 	ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
 	ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
@@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode {
 	MAX_ISCSI_EQE_OPCODE
 };
 
+/* iSCSI EQE and CQE completion status */
 enum iscsi_error_types {
 	ISCSI_STATUS_NONE = 0,
 	ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1,
@@ -823,7 +1235,7 @@ enum iscsi_error_types {
 	MAX_ISCSI_ERROR_TYPES
 };
 
-
+/* iSCSI Ramrod Command IDs */
 enum iscsi_ramrod_cmd_id {
 	ISCSI_RAMROD_CMD_ID_UNUSED = 0,
 	ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1,
@@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id {
 	ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
 	ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
 	ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
+	ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
 	MAX_ISCSI_RAMROD_CMD_ID
 };
 
-struct iscsi_reg1 {
-	__le32 reg1_map;
-#define ISCSI_REG1_NUM_SGES_MASK   0xF
-#define ISCSI_REG1_NUM_SGES_SHIFT  0
-#define ISCSI_REG1_RESERVED1_MASK  0xFFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 4
-};
-
-union iscsi_seq_num {
-	__le16 data_sn;
-	__le16 r2t_sn;
-};
-
+/* iSCSI connection termination request */
 struct iscsi_spe_conn_mac_update {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update {
 	u8 reserved0[2];
 };
 
+/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
 struct iscsi_spe_conn_offload {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload {
 	struct tcp_offload_params tcp;
 };
 
+/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in
+ * iSCSI offload ramrod.
+ */
 struct iscsi_spe_conn_offload_option2 {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 {
 	struct tcp_offload_params_opt2 tcp;
 };
 
+/* iSCSI collect connection statistics request */
+struct iscsi_spe_conn_statistics {
+	struct iscsi_slow_path_hdr hdr;
+	__le16 conn_id;
+	__le32 fw_cid;
+	u8 reset_stats;
+	u8 reserved0[7];
+	struct regpair stats_cnts_addr;
+};
+
+/* iSCSI connection termination request */
 struct iscsi_spe_conn_termination {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 conn_id;
@@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination {
 	struct regpair query_params_addr;
 };
 
+/* iSCSI firmware function destroy parameters */
 struct iscsi_spe_func_dstry {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 reserved0;
 	__le32 reserved1;
 };
 
+/* iSCSI firmware function init parameters */
 struct iscsi_spe_func_init {
 	struct iscsi_slow_path_hdr hdr;
 	__le16 half_way_close_timeout;
@@ -898,283 +1318,19 @@ struct iscsi_spe_func_init {
 	u8 num_r2tq_pages_in_ring;
 	u8 num_uhq_pages_in_ring;
 	u8 ll2_rx_queue_id;
-	u8 ooo_enable;
+	u8 flags;
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK	0x1
+#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT	0
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK	0x7F
+#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT	1
 	struct iscsi_debug_modes debug_mode;
 	__le16 reserved1;
 	__le32 reserved2;
-	__le32 reserved3;
-	__le32 reserved4;
 	struct scsi_init_func_params func_params;
 	struct scsi_init_func_queues q_params;
 };
 
-struct ystorm_iscsi_task_state {
-	struct scsi_cached_sges data_desc;
-	struct scsi_sgl_params sgl_params;
-	__le32 exp_r2t_sn;
-	__le32 buffer_offset;
-	union iscsi_seq_num seq_num;
-	struct iscsi_dif_flags dif_flags;
-	u8 flags;
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK  0x1
-#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK     0x1
-#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT    1
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK   0x3F
-#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT  2
-};
-
-struct ystorm_iscsi_task_rxmit_opt {
-	__le32 fast_rxmit_sge_offset;
-	__le32 scan_start_buffer_offset;
-	__le32 fast_rxmit_buffer_offset;
-	u8 scan_start_sgl_index;
-	u8 fast_rxmit_sgl_index;
-	__le16 reserved;
-};
-
-struct ystorm_iscsi_task_st_ctx {
-	struct ystorm_iscsi_task_state state;
-	struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
-	union iscsi_task_hdr pdu_hdr;
-};
-
-struct ystorm_iscsi_task_ag_ctx {
-	u8 reserved;
-	u8 byte1;
-	__le16 word0;
-	u8 flags0;
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK     0xF
-#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT    0
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT       4
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT       5
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT      6
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT       7
-	u8 flags1;
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK         0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT        0
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK         0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT        2
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK  0x3
-#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT      6
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK       0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT      7
-	u8 flags2;
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK        0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT       0
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT    1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT    2
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT    3
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT    4
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT    5
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT    6
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK     0x1
-#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT    7
-	u8 byte2;
-	__le32 TTT;
-	u8 byte3;
-	u8 byte4;
-	__le16 word1;
-};
-
-struct mstorm_iscsi_task_ag_ctx {
-	u8 cdu_validation;
-	u8 byte1;
-	__le16 task_cid;
-	u8 flags0;
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK     0xF
-#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT    0
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK        0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT       4
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT               5
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT              6
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK   0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT  7
-	u8 flags1;
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK     0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT    0
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK                 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT                2
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK                 0x3
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT                4
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK  0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT              7
-	u8 flags2;
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK               0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT              0
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT            1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT            2
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT            3
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT            4
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT            5
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT            6
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK             0x1
-#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT            7
-	u8 byte2;
-	__le32 reg0;
-	u8 byte3;
-	u8 byte4;
-	__le16 word1;
-};
-
-struct ustorm_iscsi_task_ag_ctx {
-	u8 reserved;
-	u8 state;
-	__le16 icid;
-	u8 flags0;
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK        0xF
-#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT       0
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK           0x1
-#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT          4
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK                   0x1
-#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT                  5
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK          0x3
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT         6
-	u8 flags1;
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK              0x3
-#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT             0
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK               0x3
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT              2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK                    0x3
-#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT                   4
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK           0x3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT          6
-	u8 flags2;
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK       0x1
-#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT      0
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK     0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT    1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK            0x1
-#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT           2
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK                  0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT                 3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK        0x1
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT       4
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK  0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT               6
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK    0x1
-#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT   7
-	u8 flags3;
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT               0
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT               1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT               2
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK                0x1
-#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT               3
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK         0xF
-#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT        4
-	__le32 dif_err_intervals;
-	__le32 dif_error_1st_interval;
-	__le32 rcv_cont_len;
-	__le32 exp_cont_len;
-	__le32 total_data_acked;
-	__le32 exp_data_acked;
-	u8 next_tid_valid;
-	u8 byte3;
-	__le16 word1;
-	__le16 next_tid;
-	__le16 word3;
-	__le32 hdr_residual_count;
-	__le32 exp_r2t_sn;
-};
-
-struct mstorm_iscsi_task_st_ctx {
-	struct scsi_cached_sges data_desc;
-	struct scsi_sgl_params sgl_params;
-	__le32 rem_task_size;
-	__le32 data_buffer_offset;
-	u8 task_type;
-	struct iscsi_dif_flags dif_flags;
-	u8 reserved0[2];
-	struct regpair sense_db;
-	__le32 expected_itt;
-	__le32 reserved1;
-};
-
-struct ustorm_iscsi_task_st_ctx {
-	__le32 rem_rcv_len;
-	__le32 exp_data_transfer_len;
-	__le32 exp_data_sn;
-	struct regpair lun;
-	struct iscsi_reg1 reg1;
-	u8 flags2;
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK             0x1
-#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT            0
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK             0x7F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT            1
-	struct iscsi_dif_flags dif_flags;
-	__le16 reserved3;
-	__le32 reserved4;
-	__le32 reserved5;
-	__le32 reserved6;
-	__le32 reserved7;
-	u8 task_type;
-	u8 error_flags;
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK     0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT    0
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK  0x1
-#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK       0x1
-#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT      2
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK             0x1F
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT            3
-	u8 flags;
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK             0x3
-#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT            0
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK            0x1
-#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT           2
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT       3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK  0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT       5
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK         0x1
-#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT        6
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK             0x1
-#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT            7
-	u8 cq_rss_number;
-};
-
-struct iscsi_task_context {
-	struct ystorm_iscsi_task_st_ctx ystorm_st_context;
-	struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
-	struct regpair ystorm_ag_padding[2];
-	struct tdif_task_context tdif_context;
-	struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
-	struct regpair mstorm_ag_padding[2];
-	struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
-	struct mstorm_iscsi_task_st_ctx mstorm_st_context;
-	struct ustorm_iscsi_task_st_ctx ustorm_st_context;
-	struct rdif_task_context rdif_context;
-};
-
+/* iSCSI task type */
 enum iscsi_task_type {
 	ISCSI_TASK_TYPE_INITIATOR_WRITE,
 	ISCSI_TASK_TYPE_INITIATOR_READ,
@@ -1186,53 +1342,57 @@ enum iscsi_task_type {
 	ISCSI_TASK_TYPE_TARGET_READ,
 	ISCSI_TASK_TYPE_TARGET_RESPONSE,
 	ISCSI_TASK_TYPE_LOGIN_RESPONSE,
+	ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
 	MAX_ISCSI_TASK_TYPE
 };
 
+/* iSCSI DesiredDataTransferLength/ttt union */
 union iscsi_ttt_txlen_union {
 	__le32 desired_tx_len;
 	__le32 ttt;
 };
 
+/* iSCSI uHQ element */
 struct iscsi_uhqe {
 	__le32 reg1;
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK     0xFFFFF
-#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT    0
-#define ISCSI_UHQE_LOCAL_COMP_MASK          0x1
-#define ISCSI_UHQE_LOCAL_COMP_SHIFT         20
-#define ISCSI_UHQE_TOGGLE_BIT_MASK          0x1
-#define ISCSI_UHQE_TOGGLE_BIT_SHIFT         21
-#define ISCSI_UHQE_PURE_PAYLOAD_MASK        0x1
-#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT       22
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK  0x1
-#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23
-#define ISCSI_UHQE_TASK_ID_HI_MASK          0xFF
-#define ISCSI_UHQE_TASK_ID_HI_SHIFT         24
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK		0xFFFFF
+#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT	0
+#define ISCSI_UHQE_LOCAL_COMP_MASK		0x1
+#define ISCSI_UHQE_LOCAL_COMP_SHIFT		20
+#define ISCSI_UHQE_TOGGLE_BIT_MASK		0x1
+#define ISCSI_UHQE_TOGGLE_BIT_SHIFT		21
+#define ISCSI_UHQE_PURE_PAYLOAD_MASK		0x1
+#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT		22
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK	0x1
+#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT	23
+#define ISCSI_UHQE_TASK_ID_HI_MASK		0xFF
+#define ISCSI_UHQE_TASK_ID_HI_SHIFT		24
 	__le32 reg2;
-#define ISCSI_UHQE_BUFFER_OFFSET_MASK       0xFFFFFF
-#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT      0
-#define ISCSI_UHQE_TASK_ID_LO_MASK          0xFF
-#define ISCSI_UHQE_TASK_ID_LO_SHIFT         24
+#define ISCSI_UHQE_BUFFER_OFFSET_MASK	0xFFFFFF
+#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT	0
+#define ISCSI_UHQE_TASK_ID_LO_MASK	0xFF
+#define ISCSI_UHQE_TASK_ID_LO_SHIFT	24
 };
 
-
+/* iSCSI WQ element */
 struct iscsi_wqe {
 	__le16 task_id;
 	u8 flags;
-#define ISCSI_WQE_WQE_TYPE_MASK        0x7
-#define ISCSI_WQE_WQE_TYPE_SHIFT       0
-#define ISCSI_WQE_NUM_SGES_MASK  0xF
-#define ISCSI_WQE_NUM_SGES_SHIFT 3
-#define ISCSI_WQE_RESPONSE_MASK        0x1
-#define ISCSI_WQE_RESPONSE_SHIFT       7
+#define ISCSI_WQE_WQE_TYPE_MASK		0x7
+#define ISCSI_WQE_WQE_TYPE_SHIFT	0
+#define ISCSI_WQE_NUM_SGES_MASK		0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT	3
+#define ISCSI_WQE_RESPONSE_MASK		0x1
+#define ISCSI_WQE_RESPONSE_SHIFT	7
 	struct iscsi_dif_flags prot_flags;
 	__le32 contlen_cdbsize;
-#define ISCSI_WQE_CONT_LEN_MASK  0xFFFFFF
-#define ISCSI_WQE_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_CDB_SIZE_MASK  0xFF
-#define ISCSI_WQE_CDB_SIZE_SHIFT 24
+#define ISCSI_WQE_CONT_LEN_MASK		0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT	0
+#define ISCSI_WQE_CDB_SIZE_MASK		0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT	24
 };
 
+/* iSCSI wqe type */
 enum iscsi_wqe_type {
 	ISCSI_WQE_TYPE_NORMAL,
 	ISCSI_WQE_TYPE_TASK_CLEANUP,
@@ -1244,6 +1404,7 @@ enum iscsi_wqe_type {
 	MAX_ISCSI_WQE_TYPE
 };
 
+/* iSCSI xHQ element */
 struct iscsi_xhqe {
 	union iscsi_ttt_txlen_union ttt_or_txlen;
 	__le32 exp_stat_sn;
@@ -1251,120 +1412,134 @@ struct iscsi_xhqe {
 	u8 total_ahs_length;
 	u8 opcode;
 	u8 flags;
-#define ISCSI_XHQE_FINAL_MASK       0x1
-#define ISCSI_XHQE_FINAL_SHIFT      0
-#define ISCSI_XHQE_STATUS_BIT_MASK  0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
-#define ISCSI_XHQE_NUM_SGES_MASK    0xF
-#define ISCSI_XHQE_NUM_SGES_SHIFT   2
-#define ISCSI_XHQE_RESERVED0_MASK   0x3
-#define ISCSI_XHQE_RESERVED0_SHIFT  6
+#define ISCSI_XHQE_FINAL_MASK		0x1
+#define ISCSI_XHQE_FINAL_SHIFT		0
+#define ISCSI_XHQE_STATUS_BIT_MASK	0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT	1
+#define ISCSI_XHQE_NUM_SGES_MASK	0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT	2
+#define ISCSI_XHQE_RESERVED0_MASK	0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT	6
 	union iscsi_seq_num seq_num;
 	__le16 reserved1;
 };
 
+/* Per PF iSCSI receive path statistics - mStorm RAM structure */
 struct mstorm_iscsi_stats_drv {
 	struct regpair iscsi_rx_dropped_pdus_task_not_valid;
+	struct regpair iscsi_rx_dup_ack_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
 struct pstorm_iscsi_stats_drv {
 	struct regpair iscsi_tx_bytes_cnt;
 	struct regpair iscsi_tx_packet_cnt;
 };
 
+/* Per PF iSCSI receive path statistics - tStorm RAM structure */
 struct tstorm_iscsi_stats_drv {
 	struct regpair iscsi_rx_bytes_cnt;
 	struct regpair iscsi_rx_packet_cnt;
 	struct regpair iscsi_rx_new_ooo_isle_events_cnt;
+	struct regpair iscsi_rx_tcp_payload_bytes_cnt;
+	struct regpair iscsi_rx_tcp_pkt_cnt;
+	struct regpair iscsi_rx_pure_ack_cnt;
 	__le32 iscsi_cmdq_threshold_cnt;
 	__le32 iscsi_rq_threshold_cnt;
 	__le32 iscsi_immq_threshold_cnt;
 };
 
+/* Per PF iSCSI receive path statistics - uStorm RAM structure */
 struct ustorm_iscsi_stats_drv {
 	struct regpair iscsi_rx_data_pdu_cnt;
 	struct regpair iscsi_rx_r2t_pdu_cnt;
 	struct regpair iscsi_rx_total_pdu_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - xStorm RAM structure */
 struct xstorm_iscsi_stats_drv {
 	struct regpair iscsi_tx_go_to_slow_start_event_cnt;
 	struct regpair iscsi_tx_fast_retransmit_event_cnt;
+	struct regpair iscsi_tx_pure_ack_cnt;
+	struct regpair iscsi_tx_delayed_ack_cnt;
 };
 
+/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
 struct ystorm_iscsi_stats_drv {
 	struct regpair iscsi_tx_data_pdu_cnt;
 	struct regpair iscsi_tx_r2t_pdu_cnt;
 	struct regpair iscsi_tx_total_pdu_cnt;
+	struct regpair iscsi_tx_tcp_payload_bytes_cnt;
+	struct regpair iscsi_tx_tcp_pkt_cnt;
 };
 
-struct tstorm_iscsi_task_ag_ctx {
+struct e4_tstorm_iscsi_task_ag_ctx {
 	u8 byte0;
 	u8 byte1;
 	__le16 word0;
 	u8 flags0;
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK  0xF
-#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT    4
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT    5
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT    6
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT    7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK	0xF
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT		4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT		5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT		6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT		7
 	u8 flags1;
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT    0
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK     0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT    1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT     2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT     4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT     6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT	1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT	6
 	u8 flags2;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT     0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT     2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT     4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT     6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT	6
 	u8 flags3;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK      0x3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT     0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT   2
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT   3
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT   4
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT   5
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT   6
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT   7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK	0x3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT	0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT	3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT	5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT	6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT	7
 	u8 flags4;
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT   0
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK    0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT   1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK  0x1
-#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT		0
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK		0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT		1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT	2
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT	3
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT	4
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT	5
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT	6
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK	0x1
+#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT	7
 	u8 byte2;
 	__le16 word1;
 	__le32 reg0;
@@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx {
 	__le32 reg1;
 	__le32 reg2;
 };
+
+/* iSCSI doorbell data */
 struct iscsi_db_data {
 	u8 params;
-#define ISCSI_DB_DATA_DEST_MASK         0x3
-#define ISCSI_DB_DATA_DEST_SHIFT        0
-#define ISCSI_DB_DATA_AGG_CMD_MASK      0x3
-#define ISCSI_DB_DATA_AGG_CMD_SHIFT     2
-#define ISCSI_DB_DATA_BYPASS_EN_MASK    0x1
-#define ISCSI_DB_DATA_BYPASS_EN_SHIFT   4
-#define ISCSI_DB_DATA_RESERVED_MASK     0x1
-#define ISCSI_DB_DATA_RESERVED_SHIFT    5
-#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK  0x3
-#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6
+#define ISCSI_DB_DATA_DEST_MASK		0x3
+#define ISCSI_DB_DATA_DEST_SHIFT	0
+#define ISCSI_DB_DATA_AGG_CMD_MASK	0x3
+#define ISCSI_DB_DATA_AGG_CMD_SHIFT	2
+#define ISCSI_DB_DATA_BYPASS_EN_MASK	0x1
+#define ISCSI_DB_DATA_BYPASS_EN_SHIFT	4
+#define ISCSI_DB_DATA_RESERVED_MASK	0x1
+#define ISCSI_DB_DATA_RESERVED_SHIFT	5
+#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK	0x3
+#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT	6
 	u8 agg_flags;
 	__le16 sq_prod;
 };
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h
index b8b3e1c..c6cfd39 100644
--- a/include/linux/qed/iwarp_common.h
+++ b/include/linux/qed/iwarp_common.h
@@ -29,9 +29,12 @@
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
+
 #ifndef __IWARP_COMMON__
 #define __IWARP_COMMON__
+
 #include <linux/qed/rdma_common.h>
+
 /************************/
 /* IWARP FW CONSTANTS	*/
 /************************/
@@ -40,14 +43,14 @@
 #define IWARP_PASSIVE_MODE 1
 
 #define IWARP_SHARED_QUEUE_PAGE_SIZE		(0x8000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET   (0x4000)
-#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET   (0x5000)
-#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET	(0x4000)
+#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE	(0x1000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET	(0x5000)
+#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE	(0x3000)
 
-#define IWARP_REQ_MAX_INLINE_DATA_SIZE          (128)
-#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE        (176)
+#define IWARP_REQ_MAX_INLINE_DATA_SIZE		(128)
+#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE	(176)
 
-#define IWARP_MAX_QPS                           (64 * 1024)
+#define IWARP_MAX_QPS				(64 * 1024)
 
 #endif /* __IWARP_COMMON__ */
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index d60de4a..147d08c 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
 	void *p_handle;
 };
 
+enum qed_filter_config_mode {
+	QED_FILTER_CONFIG_MODE_DISABLE,
+	QED_FILTER_CONFIG_MODE_5_TUPLE,
+	QED_FILTER_CONFIG_MODE_L4_PORT,
+	QED_FILTER_CONFIG_MODE_IP_DEST,
+};
+
+struct qed_ntuple_filter_params {
+	/* Physically mapped address containing header of buffer to be used
+	 * as filter.
+	 */
+	dma_addr_t addr;
+
+	/* Length of header in bytes */
+	u16 length;
+
+	/* Relative queue-id to receive classified packet */
+#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
+	u16 qid;
+
+	/* Identifier can either be according to vport-id or vfid */
+	bool b_is_vf;
+	u8 vport_id;
+	u8 vf_id;
+
+	/* true iff this filter is to be added. Else to be removed */
+	bool b_is_add;
+};
+
 struct qed_dev_eth_info {
 	struct qed_dev_info common;
 
@@ -316,13 +345,12 @@ struct qed_eth_ops {
 	int (*tunn_config)(struct qed_dev *cdev,
 			   struct qed_tunn_params *params);
 
-	int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
-				    dma_addr_t mapping, u16 length,
-				    u16 vport_id, u16 rx_queue_id,
-				    bool add_filter);
+	int (*ntuple_filter_config)(struct qed_dev *cdev,
+				    void *cookie,
+				    struct qed_ntuple_filter_params *params);
 
 	int (*configure_arfs_searcher)(struct qed_dev *cdev,
-				       bool en_searcher);
+				       enum qed_filter_config_mode mode);
 	int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
 };
 
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index cc646ca..15e398c 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
 /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
 struct qed_iscsi_pf_params {
 	u64 glbl_q_params_addr;
-	u64 bdq_pbl_base_addr[2];
-	u32 max_cwnd;
+	u64 bdq_pbl_base_addr[3];
 	u16 cq_num_entries;
 	u16 cmdq_num_entries;
 	u32 two_msl_timer;
-	u16 dup_ack_threshold;
 	u16 tx_sws_timer;
-	u16 min_rto;
-	u16 min_rto_rt;
-	u16 max_rto;
 
 	/* The following parameters are used during HW-init
 	 * and these parameters need to be passed as arguments
@@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
 
 	/* The following parameters are used during protocol-init */
 	u16 half_way_close_timeout;
-	u16 bdq_xoff_threshold[2];
-	u16 bdq_xon_threshold[2];
+	u16 bdq_xoff_threshold[3];
+	u16 bdq_xon_threshold[3];
 	u16 cmdq_xoff_threshold;
 	u16 cmdq_xon_threshold;
 	u16 rq_buffer_size;
@@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
 	u8 gl_cmd_pi;
 	u8 debug_mode;
 	u8 ll2_ooo_queue_id;
-	u8 ooo_enable;
 
 	u8 is_target;
-	u8 bdq_pbl_num_entries[2];
+	u8 is_soc_en;
+	u8 soc_num_of_blocks_log;
+	u8 bdq_pbl_num_entries[3];
 };
 
 struct qed_rdma_pf_params {
@@ -316,16 +312,16 @@ enum qed_int_mode {
 };
 
 struct qed_sb_info {
-	struct status_block	*sb_virt;
-	dma_addr_t		sb_phys;
-	u32			sb_ack; /* Last given ack */
-	u16			igu_sb_id;
-	void __iomem		*igu_addr;
-	u8			flags;
-#define QED_SB_INFO_INIT        0x1
-#define QED_SB_INFO_SETUP       0x2
+	struct status_block_e4 *sb_virt;
+	dma_addr_t sb_phys;
+	u32 sb_ack; /* Last given ack */
+	u16 igu_sb_id;
+	void __iomem *igu_addr;
+	u8 flags;
+#define QED_SB_INFO_INIT	0x1
+#define QED_SB_INFO_SETUP	0x2
 
-	struct qed_dev		*cdev;
+	struct qed_dev *cdev;
 };
 
 enum qed_dev_type {
@@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
 	u16 rc = 0;
 
 	prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
-	       STATUS_BLOCK_PROD_INDEX_MASK;
+	       STATUS_BLOCK_E4_PROD_INDEX_MASK;
 	if (sb_info->sb_ack != prod) {
 		sb_info->sb_ack = prod;
 		rc |= QED_SB_IDX;
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 111e606..d0df1be 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
 	u32 ss_thresh;
 	u16 srtt;
 	u16 rtt_var;
-	u32 ts_time;
 	u32 ts_recent;
 	u32 ts_recent_age;
 	u32 total_rt;
@@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
 	u16 mss;
 	u8 snd_wnd_scale;
 	u8 rcv_wnd_scale;
-	u32 ts_ticks_per_second;
 	u16 da_timeout_value;
 	u8 ack_frequency;
 };
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index e755954..266c1fb 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
 	u32 opaque_data_1;
 
 	/* GSI only */
-	u32 gid_dst[4];
+	u32 src_qp;
 	u16 qp_id;
 
 	union {
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index a9b3050..c1a446e 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -32,28 +32,29 @@
 
 #ifndef __RDMA_COMMON__
 #define __RDMA_COMMON__
+
 /************************/
 /* RDMA FW CONSTANTS */
 /************************/
 
-#define RDMA_RESERVED_LKEY                      (0)
-#define RDMA_RING_PAGE_SIZE                     (0x1000)
+#define RDMA_RESERVED_LKEY		(0)
+#define RDMA_RING_PAGE_SIZE		(0x1000)
 
-#define RDMA_MAX_SGE_PER_SQ_WQE         (4)
-#define RDMA_MAX_SGE_PER_RQ_WQE         (4)
+#define RDMA_MAX_SGE_PER_SQ_WQE		(4)
+#define RDMA_MAX_SGE_PER_RQ_WQE		(4)
 
 #define RDMA_MAX_DATA_SIZE_IN_WQE	(0x80000000)
 
-#define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
-#define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
+#define RDMA_REQ_RD_ATOMIC_ELM_SIZE	(0x50)
+#define RDMA_RESP_RD_ATOMIC_ELM_SIZE	(0x20)
 
-#define RDMA_MAX_CQS                            (64 * 1024)
-#define RDMA_MAX_TIDS                           (128 * 1024 - 1)
-#define RDMA_MAX_PDS                            (64 * 1024)
+#define RDMA_MAX_CQS			(64 * 1024)
+#define RDMA_MAX_TIDS			(128 * 1024 - 1)
+#define RDMA_MAX_PDS			(64 * 1024)
 
-#define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
-#define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS	MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_K2	MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB	MAX_NUM_VPORTS_BB
 
 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
 
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index fe6a33e..e15e0da 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -33,13 +33,18 @@
 #ifndef __ROCE_COMMON__
 #define __ROCE_COMMON__
 
-#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
-#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
+/************************/
+/* ROCE FW CONSTANTS */
+/************************/
 
-#define ROCE_MAX_QPS	(32 * 1024)
-#define ROCE_DCQCN_NP_MAX_QPS	(64)
-#define ROCE_DCQCN_RP_MAX_QPS	(64)
+#define ROCE_REQ_MAX_INLINE_DATA_SIZE	(256)
+#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE	(288)
 
+#define ROCE_MAX_QPS			(32 * 1024)
+#define ROCE_DCQCN_NP_MAX_QPS		(64)
+#define ROCE_DCQCN_RP_MAX_QPS		(64)
+
+/* Affiliated asynchronous events / errors enumeration */
 enum roce_async_events_type {
 	ROCE_ASYNC_EVENT_NONE = 0,
 	ROCE_ASYNC_EVENT_COMM_EST = 1,
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 08df82a..505c0b4 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -33,43 +33,77 @@
 #ifndef __STORAGE_COMMON__
 #define __STORAGE_COMMON__
 
-#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
-#define BDQ_NUM_RESOURCES (4)
+/*********************/
+/* SCSI CONSTANTS */
+/*********************/
 
-#define BDQ_ID_RQ                        (0)
-#define BDQ_ID_IMM_DATA          (1)
-#define BDQ_NUM_IDS          (2)
+#define SCSI_MAX_NUM_OF_CMDQS		(NUM_OF_GLOBAL_QUEUES / 2)
+#define BDQ_NUM_RESOURCES		(4)
 
-#define SCSI_NUM_SGES_SLOW_SGL_THR      8
+#define BDQ_ID_RQ			(0)
+#define BDQ_ID_IMM_DATA			(1)
+#define BDQ_ID_TQ			(2)
+#define BDQ_NUM_IDS			(3)
 
-#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
+#define SCSI_NUM_SGES_SLOW_SGL_THR	8
 
-struct scsi_bd {
-	struct regpair address;
-	struct regpair opaque;
+#define BDQ_MAX_EXTERNAL_RING_SIZE	BIT(15)
+
+/* SCSI op codes */
+#define SCSI_OPCODE_COMPARE_AND_WRITE	(0x89)
+#define SCSI_OPCODE_READ_10		(0x28)
+#define SCSI_OPCODE_WRITE_6		(0x0A)
+#define SCSI_OPCODE_WRITE_10		(0x2A)
+#define SCSI_OPCODE_WRITE_12		(0xAA)
+#define SCSI_OPCODE_WRITE_16		(0x8A)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_10	(0x2E)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_12	(0xAE)
+#define SCSI_OPCODE_WRITE_AND_VERIFY_16	(0x8E)
+
+/* iSCSI Drv opaque */
+struct iscsi_drv_opaque {
+	__le16 reserved_zero[3];
+	__le16 opaque;
 };
 
+/* Scsi 2B/8B opaque union */
+union scsi_opaque {
+	struct regpair fcoe_opaque;
+	struct iscsi_drv_opaque iscsi_opaque;
+};
+
+/* SCSI buffer descriptor */
+struct scsi_bd {
+	struct regpair address;
+	union scsi_opaque opaque;
+};
+
+/* Scsi Drv BDQ struct */
 struct scsi_bdq_ram_drv_data {
 	__le16 external_producer;
 	__le16 reserved0[3];
 };
 
+/* SCSI SGE entry */
 struct scsi_sge {
 	struct regpair sge_addr;
 	__le32 sge_len;
 	__le32 reserved;
 };
 
+/* Cached SGEs section */
 struct scsi_cached_sges {
 	struct scsi_sge sge[4];
 };
 
+/* Scsi Drv CMDQ struct */
 struct scsi_drv_cmdq {
 	__le16 cmdq_cons;
 	__le16 reserved0;
 	__le32 reserved1;
 };
 
+/* Common SCSI init params passed by driver to FW in function init ramrod */
 struct scsi_init_func_params {
 	__le16 num_tasks;
 	u8 log_page_size;
@@ -77,6 +111,7 @@ struct scsi_init_func_params {
 	u8 reserved2[12];
 };
 
+/* SCSI RQ/CQ/CMDQ firmware function init parameters */
 struct scsi_init_func_queues {
 	struct regpair glbl_q_params_addr;
 	__le16 rq_buffer_size;
@@ -84,39 +119,45 @@ struct scsi_init_func_queues {
 	__le16 cmdq_num_entries;
 	u8 bdq_resource_id;
 	u8 q_validity;
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK        0x1
-#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT       0
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK  0x1
-#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK       0x1
-#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT      2
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK  0x1F
-#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT			0
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK		0x1
+#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT		1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT			2
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT			3
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK			0x1
+#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT			4
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK	0x7
+#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT	5
+	__le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
 	u8 num_queues;
 	u8 queue_relative_offset;
 	u8 cq_sb_pi;
 	u8 cmdq_sb_pi;
-	__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
-	__le16 reserved0;
 	u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
+	u8 reserved1;
 	struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
 	__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
-	__le16 bdq_xon_threshold[BDQ_NUM_IDS];
 	__le16 cmdq_xoff_threshold;
+	__le16 bdq_xon_threshold[BDQ_NUM_IDS];
 	__le16 cmdq_xon_threshold;
-	__le32 reserved1;
 };
 
+/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
 struct scsi_ram_per_bdq_resource_drv_data {
 	struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
 };
 
+/* SCSI SGL types */
 enum scsi_sgl_mode {
 	SCSI_TX_SLOW_SGL,
 	SCSI_FAST_SGL,
 	MAX_SCSI_SGL_MODE
 };
 
+/* SCSI SGL parameters */
 struct scsi_sgl_params {
 	struct regpair sgl_addr;
 	__le32 sgl_total_length;
@@ -126,10 +167,16 @@ struct scsi_sgl_params {
 	u8 reserved;
 };
 
+/* SCSI terminate connection params */
 struct scsi_terminate_extra_params {
 	__le16 unsolicited_cq_count;
 	__le16 cmdq_count;
 	u8 reserved[4];
 };
 
+/* SCSI Task Queue Element */
+struct scsi_tqe {
+	__le16 itid;
+};
+
 #endif /* __STORAGE_COMMON__ */
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index dbf7a43..4a48451 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -33,8 +33,13 @@
 #ifndef __TCP_COMMON__
 #define __TCP_COMMON__
 
-#define TCP_INVALID_TIMEOUT_VAL -1
+/********************/
+/* TCP FW CONSTANTS */
+/********************/
 
+#define TCP_INVALID_TIMEOUT_VAL	-1
+
+/* OOO opaque data received from LL2 */
 struct ooo_opaque {
 	__le32 cid;
 	u8 drop_isle;
@@ -43,25 +48,29 @@ struct ooo_opaque {
 	u8 ooo_isle;
 };
 
+/* tcp connect mode enum */
 enum tcp_connect_mode {
 	TCP_CONNECT_ACTIVE,
 	TCP_CONNECT_PASSIVE,
 	MAX_TCP_CONNECT_MODE
 };
 
+/* tcp function init parameters */
 struct tcp_init_params {
 	__le32 two_msl_timer;
 	__le16 tx_sws_timer;
-	u8 maxfinrt;
+	u8 max_fin_rt;
 	u8 reserved[9];
 };
 
+/* tcp IPv4/IPv6 enum */
 enum tcp_ip_version {
 	TCP_IPV4,
 	TCP_IPV6,
 	MAX_TCP_IP_VERSION
 };
 
+/* tcp offload parameters */
 struct tcp_offload_params {
 	__le16 local_mac_addr_lo;
 	__le16 local_mac_addr_mid;
@@ -70,24 +79,29 @@ struct tcp_offload_params {
 	__le16 remote_mac_addr_mid;
 	__le16 remote_mac_addr_hi;
 	__le16 vlan_id;
-	u8 flags;
-#define TCP_OFFLOAD_PARAMS_TS_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT        0
-#define TCP_OFFLOAD_PARAMS_DA_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT        1
-#define TCP_OFFLOAD_PARAMS_KA_EN_MASK         0x1
-#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT        2
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT     3
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK     0x1
-#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT    4
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT     5
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK  0x1
-#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
-#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK     0x1
-#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT    7
+	__le16 flags;
+#define TCP_OFFLOAD_PARAMS_TS_EN_MASK			0x1
+#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT			0
+#define TCP_OFFLOAD_PARAMS_DA_EN_MASK			0x1
+#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT			1
+#define TCP_OFFLOAD_PARAMS_KA_EN_MASK			0x1
+#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT			2
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT		3
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT	4
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT		5
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT		6
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT		7
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK		0x1
+#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT		8
+#define TCP_OFFLOAD_PARAMS_RESERVED_MASK		0x7F
+#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT		9
 	u8 ip_version;
+	u8 reserved0[3];
 	__le32 remote_ip[4];
 	__le32 local_ip[4];
 	__le32 flow_label;
@@ -99,17 +113,21 @@ struct tcp_offload_params {
 	u8 rcv_wnd_scale;
 	u8 connect_mode;
 	__le16 srtt;
-	__le32 cwnd;
 	__le32 ss_thresh;
-	__le16 reserved1;
+	__le32 rcv_wnd;
+	__le32 cwnd;
 	u8 ka_max_probe_cnt;
 	u8 dup_ack_theshold;
+	__le16 reserved1;
+	__le32 ka_timeout;
+	__le32 ka_interval;
+	__le32 max_rt_time;
+	__le32 initial_rcv_wnd;
 	__le32 rcv_next;
 	__le32 snd_una;
 	__le32 snd_next;
 	__le32 snd_max;
 	__le32 snd_wnd;
-	__le32 rcv_wnd;
 	__le32 snd_wl1;
 	__le32 ts_recent;
 	__le32 ts_recent_age;
@@ -122,16 +140,13 @@ struct tcp_offload_params {
 	u8 rt_cnt;
 	__le16 rtt_var;
 	__le16 fw_internal;
-	__le32 ka_timeout;
-	__le32 ka_interval;
-	__le32 max_rt_time;
-	__le32 initial_rcv_wnd;
 	u8 snd_wnd_scale;
 	u8 ack_frequency;
 	__le16 da_timeout_value;
-	__le32 reserved3[2];
+	__le32 reserved3;
 };
 
+/* tcp offload parameters */
 struct tcp_offload_params_opt2 {
 	__le16 local_mac_addr_lo;
 	__le16 local_mac_addr_mid;
@@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 {
 	__le16 remote_mac_addr_mid;
 	__le16 remote_mac_addr_hi;
 	__le16 vlan_id;
-	u8 flags;
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT     0
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT     1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK      0x1
-#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT     2
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK  0x1F
-#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
+	__le16 flags;
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT	0
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT	1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT	2
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK	0x1
+#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT	3
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK	0xFFF
+#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT	4
 	u8 ip_version;
+	u8 reserved1[3];
 	__le32 remote_ip[4];
 	__le32 local_ip[4];
 	__le32 flow_label;
@@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 {
 	__le16 syn_ip_payload_length;
 	__le32 syn_phy_addr_lo;
 	__le32 syn_phy_addr_hi;
-	__le32 reserved1[22];
+	__le32 cwnd;
+	u8 ka_max_probe_cnt;
+	u8 reserved2[3];
+	__le32 ka_timeout;
+	__le32 ka_interval;
+	__le32 max_rt_time;
+	__le32 reserved3[16];
 };
 
+/* tcp IPv4/IPv6 enum */
 enum tcp_seg_placement_event {
 	TCP_EVENT_ADD_PEN,
 	TCP_EVENT_ADD_NEW_ISLE,
@@ -177,40 +202,41 @@ enum tcp_seg_placement_event {
 	MAX_TCP_SEG_PLACEMENT_EVENT
 };
 
+/* tcp init parameters */
 struct tcp_update_params {
 	__le16 flags;
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK   0x1
-#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT  0
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK               0x1
-#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT              1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK               0x1
-#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT              2
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK         0x1
-#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT        3
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK        0x1
-#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT       4
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK       0x1
-#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT      5
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK       0x1
-#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT      6
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK        0x1
-#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT       7
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK   0x1
-#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT  8
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK  0x1
-#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK             0x1
-#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT            10
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK          0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT         11
-#define TCP_UPDATE_PARAMS_KA_EN_MASK                     0x1
-#define TCP_UPDATE_PARAMS_KA_EN_SHIFT                    12
-#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK                  0x1
-#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT                 13
-#define TCP_UPDATE_PARAMS_KA_RESTART_MASK                0x1
-#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT               14
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK        0x1
-#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT       15
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT		0
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT			1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT			2
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT		3
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT		4
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT		5
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT		6
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT		7
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT		8
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK		0x1
+#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT	9
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT			10
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK			0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT		11
+#define TCP_UPDATE_PARAMS_KA_EN_MASK				0x1
+#define TCP_UPDATE_PARAMS_KA_EN_SHIFT				12
+#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK				0x1
+#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT			13
+#define TCP_UPDATE_PARAMS_KA_RESTART_MASK			0x1
+#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT			14
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK		0x1
+#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT		15
 	__le16 remote_mac_addr_lo;
 	__le16 remote_mac_addr_mid;
 	__le16 remote_mac_addr_hi;
@@ -226,6 +252,7 @@ struct tcp_update_params {
 	u8 reserved1[7];
 };
 
+/* toe upload parameters */
 struct tcp_upload_params {
 	__le32 rcv_next;
 	__le32 snd_una;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 361c08e..c9df252 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -207,6 +207,7 @@ struct rhashtable_iter {
 	struct rhashtable_walker walker;
 	unsigned int slot;
 	unsigned int skip;
+	bool end_of_table;
 };
 
 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
@@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
 	return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
 }
 
-static inline unsigned int rht_key_hashfn(
-	struct rhashtable *ht, const struct bucket_table *tbl,
-	const void *key, const struct rhashtable_params params)
+static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
+	const void *key, const struct rhashtable_params params,
+	unsigned int hash_rnd)
 {
 	unsigned int hash;
 
 	/* params must be equal to ht->p if it isn't constant. */
 	if (!__builtin_constant_p(params.key_len))
-		hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+		hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
 	else if (params.key_len) {
 		unsigned int key_len = params.key_len;
 
 		if (params.hashfn)
-			hash = params.hashfn(key, key_len, tbl->hash_rnd);
+			hash = params.hashfn(key, key_len, hash_rnd);
 		else if (key_len & (sizeof(u32) - 1))
-			hash = jhash(key, key_len, tbl->hash_rnd);
+			hash = jhash(key, key_len, hash_rnd);
 		else
-			hash = jhash2(key, key_len / sizeof(u32),
-				      tbl->hash_rnd);
+			hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
 	} else {
 		unsigned int key_len = ht->p.key_len;
 
 		if (params.hashfn)
-			hash = params.hashfn(key, key_len, tbl->hash_rnd);
+			hash = params.hashfn(key, key_len, hash_rnd);
 		else
-			hash = jhash(key, key_len, tbl->hash_rnd);
+			hash = jhash(key, key_len, hash_rnd);
 	}
 
+	return hash;
+}
+
+static inline unsigned int rht_key_hashfn(
+	struct rhashtable *ht, const struct bucket_table *tbl,
+	const void *key, const struct rhashtable_params params)
+{
+	unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
+
 	return rht_bucket_index(tbl, hash);
 }
 
@@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
 void rhashtable_walk_enter(struct rhashtable *ht,
 			   struct rhashtable_iter *iter);
 void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+
+static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+{
+	(void)rhashtable_walk_start_check(iter);
+}
+
 void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void *rhashtable_walk_peek(struct rhashtable_iter *iter);
 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
 
 void rhashtable_free_and_destroy(struct rhashtable *ht,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2032ce2..62d508b 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -97,13 +97,9 @@ void rtnetlink_init(void);
 void __rtnl_unlock(void);
 void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
 
-#define ASSERT_RTNL() do { \
-	if (unlikely(!rtnl_is_locked())) { \
-		printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
-		       __FILE__,  __LINE__); \
-		dump_stack(); \
-	} \
-} while(0)
+#define ASSERT_RTNL() \
+	WARN_ONCE(!rtnl_is_locked(), \
+		  "RTNL: assertion failed at %s (%d)\n", __FILE__,  __LINE__)
 
 extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
 			     struct netlink_callback *cb,
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index da803df..b36c766 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -102,11 +102,15 @@ enum sctp_cid {
 	/* AUTH Extension Section 4.1 */
 	SCTP_CID_AUTH			= 0x0F,
 
+	/* sctp ndata 5.1. I-DATA */
+	SCTP_CID_I_DATA			= 0x40,
+
 	/* PR-SCTP Sec 3.2 */
 	SCTP_CID_FWD_TSN		= 0xC0,
 
 	/* Use hex, as defined in ADDIP sec. 3.1 */
 	SCTP_CID_ASCONF			= 0xC1,
+	SCTP_CID_I_FWD_TSN		= 0xC2,
 	SCTP_CID_ASCONF_ACK		= 0x80,
 	SCTP_CID_RECONF			= 0x82,
 }; /* enum */
@@ -240,6 +244,23 @@ struct sctp_data_chunk {
 	struct sctp_datahdr data_hdr;
 };
 
+struct sctp_idatahdr {
+	__be32 tsn;
+	__be16 stream;
+	__be16 reserved;
+	__be32 mid;
+	union {
+		__u32 ppid;
+		__be32 fsn;
+	};
+	__u8 payload[0];
+};
+
+struct sctp_idata_chunk {
+	struct sctp_chunkhdr chunk_hdr;
+	struct sctp_idatahdr data_hdr;
+};
+
 /* DATA Chuck Specific Flags */
 enum {
 	SCTP_DATA_MIDDLE_FRAG	= 0x00,
@@ -596,6 +617,22 @@ struct sctp_fwdtsn_chunk {
 	struct sctp_fwdtsn_hdr fwdtsn_hdr;
 };
 
+struct sctp_ifwdtsn_skip {
+	__be16 stream;
+	__u8 reserved;
+	__u8 flags;
+	__be32 mid;
+};
+
+struct sctp_ifwdtsn_hdr {
+	__be32 new_cum_tsn;
+	struct sctp_ifwdtsn_skip skip[0];
+};
+
+struct sctp_ifwdtsn_chunk {
+	struct sctp_chunkhdr chunk_hdr;
+	struct sctp_ifwdtsn_hdr fwdtsn_hdr;
+};
 
 /* ADDIP
  * Section 3.1.1 Address Configuration Change Chunk (ASCONF)
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 4a906f5..e724d5a 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -3,7 +3,7 @@
 
 #include <linux/phy.h>
 
-struct __packed sfp_eeprom_base {
+struct sfp_eeprom_base {
 	u8 phys_id;
 	u8 phys_ext_id;
 	u8 connector;
@@ -165,13 +165,47 @@ struct __packed sfp_eeprom_base {
 	char vendor_rev[4];
 	union {
 		__be16 optical_wavelength;
-		u8 cable_spec;
-	};
+		__be16 cable_compliance;
+		struct {
+#if defined __BIG_ENDIAN_BITFIELD
+			u8 reserved60_2:6;
+			u8 fc_pi_4_app_h:1;
+			u8 sff8431_app_e:1;
+			u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+			u8 sff8431_app_e:1;
+			u8 fc_pi_4_app_h:1;
+			u8 reserved60_2:6;
+			u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+		} __packed passive;
+		struct {
+#if defined __BIG_ENDIAN_BITFIELD
+			u8 reserved60_4:4;
+			u8 fc_pi_4_lim:1;
+			u8 sff8431_lim:1;
+			u8 fc_pi_4_app_h:1;
+			u8 sff8431_app_e:1;
+			u8 reserved61:8;
+#elif defined __LITTLE_ENDIAN_BITFIELD
+			u8 sff8431_app_e:1;
+			u8 fc_pi_4_app_h:1;
+			u8 sff8431_lim:1;
+			u8 fc_pi_4_lim:1;
+			u8 reserved60_4:4;
+			u8 reserved61:8;
+#else
+#error Unknown Endian
+#endif
+		} __packed active;
+	} __packed;
 	u8 reserved62;
 	u8 cc_base;
-};
+} __packed;
 
-struct __packed sfp_eeprom_ext {
+struct sfp_eeprom_ext {
 	__be16 options;
 	u8 br_max;
 	u8 br_min;
@@ -181,12 +215,21 @@ struct __packed sfp_eeprom_ext {
 	u8 enhopts;
 	u8 sff8472_compliance;
 	u8 cc_ext;
-};
+} __packed;
 
-struct __packed sfp_eeprom_id {
+/**
+ * struct sfp_eeprom_id - raw SFP module identification information
+ * @base: base SFP module identification structure
+ * @ext: extended SFP module identification structure
+ *
+ * See the SFF-8472 specification and related documents for the definition
+ * of these structure members. This can be obtained from
+ * ftp://ftp.seagate.com/sff
+ */
+struct sfp_eeprom_id {
 	struct sfp_eeprom_base base;
 	struct sfp_eeprom_ext ext;
-};
+} __packed;
 
 /* SFP EEPROM registers */
 enum {
@@ -222,6 +265,7 @@ enum {
 	SFP_SFF8472_COMPLIANCE		= 0x5e,
 	SFP_CC_EXT			= 0x5f,
 
+	SFP_PHYS_ID_SFF			= 0x02,
 	SFP_PHYS_ID_SFP			= 0x03,
 	SFP_PHYS_EXT_ID_SFP		= 0x04,
 	SFP_CONNECTOR_UNSPEC		= 0x00,
@@ -347,19 +391,32 @@ enum {
 	SFP_PAGE			= 0x7f,
 };
 
-struct device_node;
+struct fwnode_handle;
 struct ethtool_eeprom;
 struct ethtool_modinfo;
 struct net_device;
 struct sfp_bus;
 
+/**
+ * struct sfp_upstream_ops - upstream operations structure
+ * @module_insert: called after a module has been detected to determine
+ *   whether the module is supported for the upstream device.
+ * @module_remove: called after the module has been removed.
+ * @link_down: called when the link is non-operational for whatever
+ *   reason.
+ * @link_up: called when the link is operational.
+ * @connect_phy: called when an I2C accessible PHY has been detected
+ *   on the module.
+ * @disconnect_phy: called when a module with an I2C accessible PHY has
+ *   been removed.
+ */
 struct sfp_upstream_ops {
-	int (*module_insert)(void *, const struct sfp_eeprom_id *id);
-	void (*module_remove)(void *);
-	void (*link_down)(void *);
-	void (*link_up)(void *);
-	int (*connect_phy)(void *, struct phy_device *);
-	void (*disconnect_phy)(void *);
+	int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
+	void (*module_remove)(void *priv);
+	void (*link_down)(void *priv);
+	void (*link_up)(void *priv);
+	int (*connect_phy)(void *priv, struct phy_device *);
+	void (*disconnect_phy)(void *priv);
 };
 
 #if IS_ENABLED(CONFIG_SFP)
@@ -375,7 +432,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
 			  u8 *data);
 void sfp_upstream_start(struct sfp_bus *bus);
 void sfp_upstream_stop(struct sfp_bus *bus);
-struct sfp_bus *sfp_register_upstream(struct device_node *np,
+struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
 				      struct net_device *ndev, void *upstream,
 				      const struct sfp_upstream_ops *ops);
 void sfp_unregister_upstream(struct sfp_bus *bus);
@@ -419,7 +476,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
 {
 }
 
-static inline struct sfp_bus *sfp_register_upstream(struct device_node *np,
+static inline struct sfp_bus *sfp_register_upstream(
+	struct fwnode_handle *fwnode,
 	struct net_device *ndev, void *upstream,
 	const struct sfp_upstream_ops *ops)
 {
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index 94081e9..6dfda97 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -5,12 +5,9 @@
 #include <linux/phy.h>
 #include <linux/if_ether.h>
 
-enum {EDMAC_LITTLE_ENDIAN};
-
 struct sh_eth_plat_data {
 	int phy;
 	int phy_irq;
-	int edmac_endian;
 	phy_interface_t phy_interface;
 	void (*set_mdio_gate)(void *addr);
 
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index 8621ffd..c7addf3 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -72,6 +72,11 @@ static inline bool __skb_array_empty(struct skb_array *a)
 	return !__ptr_ring_peek(&a->ring);
 }
 
+static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
+{
+	return __ptr_ring_peek(&a->ring);
+}
+
 static inline bool skb_array_empty(struct skb_array *a)
 {
 	return ptr_ring_empty(&a->ring);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a38c80e..b8e0da6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1211,6 +1211,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
 				  data, proto, nhoff, hlen, flags);
 }
 
+void
+skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+			     struct flow_dissector *flow_dissector,
+			     void *target_container);
+
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
 	if (!skb->l4_hash && !skb->sw_hash)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3bf2735..4894d32 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -409,4 +409,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+			   size_t max_size, unsigned int cpu_mult,
+			   gfp_t gfp);
+
+void free_bucket_spinlocks(spinlock_t *locks);
+
 #endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca4a636..4f93f095 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -344,7 +344,7 @@ struct tcp_sock {
 
 /* Receiver queue space */
 	struct {
-		int	space;
+		u32	space;
 		u32	seq;
 		u64	time;
 	} rcvq_space;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index af44e7c..8a1442c 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -467,6 +467,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file)
 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
 int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
 void perf_event_detach_bpf_prog(struct perf_event *event);
+int perf_event_query_prog_array(struct perf_event *event, void __user *info);
 #else
 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 {
@@ -481,6 +482,11 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
 
 static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
 
+static inline int
+perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+	return -EOPNOTSUPP;
+}
 #endif
 
 enum {
@@ -528,6 +534,7 @@ do {									\
 struct perf_event;
 
 DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+DECLARE_PER_CPU(int, bpf_kprobe_override);
 
 extern int  perf_trace_init(struct perf_event *event);
 extern void perf_trace_destroy(struct perf_event *event);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index fd08df7..6ed9692 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -86,7 +86,7 @@ struct tc_action_ops {
 	int     (*act)(struct sk_buff *, const struct tc_action *,
 		       struct tcf_result *);
 	int     (*dump)(struct sk_buff *, struct tc_action *, int, int);
-	void	(*cleanup)(struct tc_action *, int bind);
+	void	(*cleanup)(struct tc_action *);
 	int     (*lookup)(struct net *, struct tc_action **, u32);
 	int     (*init)(struct net *net, struct nlattr *nla,
 			struct nlattr *est, struct tc_action **act, int ovr,
@@ -120,12 +120,19 @@ int tc_action_net_init(struct tc_action_net *tn,
 void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
 			 struct tcf_idrinfo *idrinfo);
 
-static inline void tc_action_net_exit(struct tc_action_net *tn)
+static inline void tc_action_net_exit(struct list_head *net_list,
+				      unsigned int id)
 {
+	struct net *net;
+
 	rtnl_lock();
-	tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+	list_for_each_entry(net, net_list, exit_list) {
+		struct tc_action_net *tn = net_generic(net, id);
+
+		tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
+		kfree(tn->idrinfo);
+	}
 	rtnl_unlock();
-	kfree(tn->idrinfo);
 }
 
 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index b623b65..c4185a7 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -180,7 +180,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
  */
 int ipv6_addr_label_init(void);
 void ipv6_addr_label_cleanup(void);
-void ipv6_addr_label_rtnl_register(void);
+int ipv6_addr_label_rtnl_register(void);
 u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
 		    int type, int ifindex);
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index cb4d92b..ab30a22 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1773,6 +1773,8 @@ enum cfg80211_signal_type {
  *	by %parent_bssid.
  * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to
  *	the BSS that requested the scan in which the beacon/probe was received.
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  */
 struct cfg80211_inform_bss {
 	struct ieee80211_channel *chan;
@@ -1781,6 +1783,8 @@ struct cfg80211_inform_bss {
 	u64 boottime_ns;
 	u64 parent_tsf;
 	u8 parent_bssid[ETH_ALEN] __aligned(2);
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
 };
 
 /**
@@ -1824,6 +1828,8 @@ struct cfg80211_bss_ies {
  *	that holds the beacon data. @beacon_ies is still valid, of course, and
  *	points to the same data as hidden_beacon_bss->beacon_ies in that case.
  * @signal: signal strength value (type depends on the wiphy's signal_type)
+ * @chains: bitmask for filled values in @chain_signal.
+ * @chain_signal: per-chain signal strength of last received BSS in dBm.
  * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
  */
 struct cfg80211_bss {
@@ -1842,6 +1848,8 @@ struct cfg80211_bss {
 	u16 capability;
 
 	u8 bssid[ETH_ALEN];
+	u8 chains;
+	s8 chain_signal[IEEE80211_MAX_CHAINS];
 
 	u8 priv[0] __aligned(sizeof(void *));
 };
@@ -2021,6 +2029,9 @@ struct cfg80211_disassoc_request {
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
  *	will be used in ht_capa.  Un-supported values will be ignored.
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
+ * @wep_keys: static WEP keys, if not NULL points to an array of
+ * 	CFG80211_MAX_WEP_KEYS WEP keys
+ * @wep_tx_key: key index (0..3) of the default TX static WEP key
  */
 struct cfg80211_ibss_params {
 	const u8 *ssid;
@@ -2037,6 +2048,8 @@ struct cfg80211_ibss_params {
 	int mcast_rate[NUM_NL80211_BANDS];
 	struct ieee80211_ht_cap ht_capa;
 	struct ieee80211_ht_cap ht_capa_mask;
+	struct key_params *wep_keys;
+	int wep_tx_key;
 };
 
 /**
@@ -5575,7 +5588,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
  * cfg80211_rx_mgmt - notification of received, unprocessed management frame
  * @wdev: wireless device receiving the frame
  * @freq: Frequency on which the frame was received in MHz
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
  * @buf: Management frame (header + body)
  * @len: length of the frame data
  * @flags: flags, as defined in enum nl80211_rxmgmt_flags
@@ -5754,7 +5767,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
  * @frame: the frame
  * @len: length of the frame
  * @freq: frequency the frame was received on
- * @sig_dbm: signal strength in mBm, or 0 if unknown
+ * @sig_dbm: signal strength in dBm, or 0 if unknown
  *
  * Use this function to report to userspace when a beacon was
  * received. It is not useful to call this when there is no
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 55df993..342d250 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -69,6 +69,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
  */
 struct dn_route {
 	struct dst_entry dst;
+	struct dn_route __rcu *dn_next;
 
 	struct neighbour *n;
 
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 2a05738..6cb602d 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -296,31 +296,39 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
 	return mask;
 }
 
-static inline u8 dsa_upstream_port(struct dsa_switch *ds)
+/* Return the local port used to reach an arbitrary switch port */
+static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
+					    int port)
 {
-	struct dsa_switch_tree *dst = ds->dst;
-
-	/*
-	 * If this is the root switch (i.e. the switch that connects
-	 * to the CPU), return the cpu port number on this switch.
-	 * Else return the (DSA) port number that connects to the
-	 * switch that is one hop closer to the cpu.
-	 */
-	if (dst->cpu_dp->ds == ds)
-		return dst->cpu_dp->index;
+	if (device == ds->index)
+		return port;
 	else
-		return ds->rtable[dst->cpu_dp->ds->index];
+		return ds->rtable[device];
+}
+
+/* Return the local port used to reach the dedicated CPU port */
+static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
+{
+	const struct dsa_port *dp = dsa_to_port(ds, port);
+	const struct dsa_port *cpu_dp = dp->cpu_dp;
+
+	if (!cpu_dp)
+		return port;
+
+	return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
 }
 
 typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
 			      bool is_static, void *data);
 struct dsa_switch_ops {
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 	/*
 	 * Legacy probing.
 	 */
 	const char	*(*probe)(struct device *dsa_dev,
 				  struct device *host_dev, int sw_addr,
 				  void **priv);
+#endif
 
 	enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
 						  int port);
@@ -412,12 +420,10 @@ struct dsa_switch_ops {
 	 */
 	int	(*port_vlan_filtering)(struct dsa_switch *ds, int port,
 				       bool vlan_filtering);
-	int	(*port_vlan_prepare)(struct dsa_switch *ds, int port,
-				     const struct switchdev_obj_port_vlan *vlan,
-				     struct switchdev_trans *trans);
-	void	(*port_vlan_add)(struct dsa_switch *ds, int port,
-				 const struct switchdev_obj_port_vlan *vlan,
-				 struct switchdev_trans *trans);
+	int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
+				 const struct switchdev_obj_port_vlan *vlan);
+	void (*port_vlan_add)(struct dsa_switch *ds, int port,
+			      const struct switchdev_obj_port_vlan *vlan);
 	int	(*port_vlan_del)(struct dsa_switch *ds, int port,
 				 const struct switchdev_obj_port_vlan *vlan);
 	/*
@@ -433,12 +439,10 @@ struct dsa_switch_ops {
 	/*
 	 * Multicast database
 	 */
-	int	(*port_mdb_prepare)(struct dsa_switch *ds, int port,
-				    const struct switchdev_obj_port_mdb *mdb,
-				    struct switchdev_trans *trans);
-	void	(*port_mdb_add)(struct dsa_switch *ds, int port,
-				const struct switchdev_obj_port_mdb *mdb,
-				struct switchdev_trans *trans);
+	int (*port_mdb_prepare)(struct dsa_switch *ds, int port,
+				const struct switchdev_obj_port_mdb *mdb);
+	void (*port_mdb_add)(struct dsa_switch *ds, int port,
+			     const struct switchdev_obj_port_mdb *mdb);
 	int	(*port_mdb_del)(struct dsa_switch *ds, int port,
 				const struct switchdev_obj_port_mdb *mdb);
 	/*
@@ -472,11 +476,20 @@ struct dsa_switch_driver {
 	const struct dsa_switch_ops *ops;
 };
 
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 /* Legacy driver registration */
 void register_switch_driver(struct dsa_switch_driver *type);
 void unregister_switch_driver(struct dsa_switch_driver *type);
 struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
 
+#else
+static inline void register_switch_driver(struct dsa_switch_driver *type) { }
+static inline void unregister_switch_driver(struct dsa_switch_driver *type) { }
+static inline struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
+{
+	return NULL;
+}
+#endif
 struct net_device *dsa_dev_to_net_device(struct device *dev);
 
 /* Keep inline for faster access in hot path */
diff --git a/include/net/dst.h b/include/net/dst.h
index b091fd5..33d2a54 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -34,13 +34,9 @@ struct sk_buff;
 
 struct dst_entry {
 	struct net_device       *dev;
-	struct rcu_head		rcu_head;
-	struct dst_entry	*child;
 	struct  dst_ops	        *ops;
 	unsigned long		_metrics;
 	unsigned long           expires;
-	struct dst_entry	*path;
-	struct dst_entry	*from;
 #ifdef CONFIG_XFRM
 	struct xfrm_state	*xfrm;
 #else
@@ -59,8 +55,6 @@ struct dst_entry {
 #define DST_XFRM_QUEUE		0x0040
 #define DST_METADATA		0x0080
 
-	short			error;
-
 	/* A non-zero value of dst->obsolete forces by-hand validation
 	 * of the route entry.  Positive values are set by the generic
 	 * dst layer to indicate that the entry has been forcefully
@@ -76,35 +70,24 @@ struct dst_entry {
 #define DST_OBSOLETE_KILL	-2
 	unsigned short		header_len;	/* more space at head required */
 	unsigned short		trailer_len;	/* space to reserve at tail */
-	unsigned short		__pad3;
 
-#ifdef CONFIG_IP_ROUTE_CLASSID
-	__u32			tclassid;
-#else
-	__u32			__pad2;
-#endif
-
-#ifdef CONFIG_64BIT
-	/*
-	 * Align __refcnt to a 64 bytes alignment
-	 * (L1_CACHE_SIZE would be too much)
-	 */
-	long			__pad_to_align_refcnt[2];
-#endif
 	/*
 	 * __refcnt wants to be on a different cache line from
 	 * input/output/ops or performance tanks badly
 	 */
-	atomic_t		__refcnt;	/* client references	*/
+#ifdef CONFIG_64BIT
+	atomic_t		__refcnt;	/* 64-bit offset 64 */
+#endif
 	int			__use;
 	unsigned long		lastuse;
 	struct lwtunnel_state   *lwtstate;
-	union {
-		struct dst_entry	*next;
-		struct rtable __rcu	*rt_next;
-		struct rt6_info __rcu	*rt6_next;
-		struct dn_route __rcu	*dn_next;
-	};
+	struct rcu_head		rcu_head;
+	short			error;
+	short			__pad;
+	__u32			tclassid;
+#ifndef CONFIG_64BIT
+	atomic_t		__refcnt;	/* 32-bit offset 64 */
+#endif
 };
 
 struct dst_metrics {
@@ -250,7 +233,7 @@ static inline void dst_hold(struct dst_entry *dst)
 {
 	/*
 	 * If your kernel compilation stops here, please check
-	 * __pad_to_align_refcnt declaration in struct dst_entry
+	 * the placement of __refcnt in struct dst_entry
 	 */
 	BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
 	WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
diff --git a/include/net/erspan.h b/include/net/erspan.h
index ca94fc8..acdf684 100644
--- a/include/net/erspan.h
+++ b/include/net/erspan.h
@@ -15,7 +15,7 @@
  *  s, Recur, Flags, Version fields only S (bit 03) is set to 1. The
  *  other fields are set to zero, so only a sequence number follows.
  *
- *  ERSPAN Type II header (8 octets [42:49])
+ *  ERSPAN Version 1 (Type II) header (8 octets [42:49])
  *  0                   1                   2                   3
  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -24,11 +24,29 @@
  * |      Reserved         |                  Index                |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
+ *
+ *  ERSPAN Version 2 (Type III) header (12 octets [42:49])
+ *  0                   1                   2                   3
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |  Ver  |          VLAN         | COS |BSO|T|     Session ID    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                          Timestamp                            |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |             SGT               |P|    FT   |   Hw ID   |D|Gra|O|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *      Platform Specific SubHeader (8 octets, optional)
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |  Platf ID |               Platform Specific Info              |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                  Platform Specific Info                       |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
  * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB
  */
 
-#define ERSPAN_VERSION	0x1
-
+#define ERSPAN_VERSION	0x1	/* ERSPAN type II */
 #define VER_MASK	0xf000
 #define VLAN_MASK	0x0fff
 #define COS_MASK	0xe000
@@ -37,6 +55,28 @@
 #define ID_MASK		0x03ff
 #define INDEX_MASK	0xfffff
 
+#define ERSPAN_VERSION2	0x2	/* ERSPAN type III*/
+#define BSO_MASK	EN_MASK
+#define SGT_MASK	0xffff0000
+#define P_MASK		0x8000
+#define FT_MASK		0x7c00
+#define HWID_MASK	0x03f0
+#define DIR_MASK	0x0008
+#define GRA_MASK	0x0006
+#define O_MASK		0x0001
+
+/* ERSPAN version 2 metadata header */
+struct erspan_md2 {
+	__be32 timestamp;
+	__be16 sgt;	/* security group tag */
+	__be16 flags;
+#define P_OFFSET	15
+#define FT_OFFSET	10
+#define HWID_OFFSET	4
+#define DIR_OFFSET	3
+#define GRA_OFFSET	1
+};
+
 enum erspan_encap_type {
 	ERSPAN_ENCAP_NOVLAN = 0x0,	/* originally without VLAN tag */
 	ERSPAN_ENCAP_ISL = 0x1,		/* originally ISL encapsulated */
@@ -44,18 +84,159 @@ enum erspan_encap_type {
 	ERSPAN_ENCAP_INFRAME = 0x3,	/* VLAN tag perserved in frame */
 };
 
+#define ERSPAN_V1_MDSIZE	4
+#define ERSPAN_V2_MDSIZE	8
 struct erspan_metadata {
-	__be32 index;   /* type II */
+	union {
+		__be32 index;		/* Version 1 (type II)*/
+		struct erspan_md2 md2;	/* Version 2 (type III) */
+	} u;
+	int version;
 };
 
-struct erspanhdr {
+struct erspan_base_hdr {
 	__be16 ver_vlan;
 #define VER_OFFSET  12
 	__be16 session_id;
 #define COS_OFFSET  13
 #define EN_OFFSET   11
+#define BSO_OFFSET  EN_OFFSET
 #define T_OFFSET    10
-	struct erspan_metadata md;
 };
 
+static inline int erspan_hdr_len(int version)
+{
+	return sizeof(struct erspan_base_hdr) +
+	       (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE);
+}
+
+static inline u8 tos_to_cos(u8 tos)
+{
+	u8 dscp, cos;
+
+	dscp = tos >> 2;
+	cos = dscp >> 3;
+	return cos;
+}
+
+static inline void erspan_build_header(struct sk_buff *skb,
+				__be32 id, u32 index,
+				bool truncate, bool is_ipv4)
+{
+	struct ethhdr *eth = eth_hdr(skb);
+	enum erspan_encap_type enc_type;
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *ersmd;
+	struct qtag_prefix {
+		__be16 eth_type;
+		__be16 tci;
+	} *qp;
+	u16 vlan_tci = 0;
+	u8 tos;
+
+	tos = is_ipv4 ? ip_hdr(skb)->tos :
+			(ipv6_hdr(skb)->priority << 4) +
+			(ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+	enc_type = ERSPAN_ENCAP_NOVLAN;
+
+	/* If mirrored packet has vlan tag, extract tci and
+	 *  perserve vlan header in the mirrored frame.
+	 */
+	if (eth->h_proto == htons(ETH_P_8021Q)) {
+		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+		vlan_tci = ntohs(qp->tci);
+		enc_type = ERSPAN_ENCAP_INFRAME;
+	}
+
+	skb_push(skb, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+	ershdr = (struct erspan_base_hdr *)skb->data;
+	memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V1_MDSIZE);
+
+	/* Build base header */
+	ershdr->ver_vlan = htons((vlan_tci & VLAN_MASK) |
+				 (ERSPAN_VERSION << VER_OFFSET));
+	ershdr->session_id = htons((u16)(ntohl(id) & ID_MASK) |
+			   ((tos_to_cos(tos) << COS_OFFSET) & COS_MASK) |
+			   (enc_type << EN_OFFSET & EN_MASK) |
+			   ((truncate << T_OFFSET) & T_MASK));
+
+	/* Build metadata */
+	ersmd = (struct erspan_metadata *)(ershdr + 1);
+	ersmd->u.index = htonl(index & INDEX_MASK);
+}
+
+/* ERSPAN GRA: timestamp granularity
+ *   00b --> granularity = 100 microseconds
+ *   01b --> granularity = 100 nanoseconds
+ *   10b --> granularity = IEEE 1588
+ * Here we only support 100 microseconds.
+ */
+static inline __be32 erspan_get_timestamp(void)
+{
+	u64 h_usecs;
+	ktime_t kt;
+
+	kt = ktime_get_real();
+	h_usecs = ktime_divns(kt, 100 * NSEC_PER_USEC);
+
+	/* ERSPAN base header only has 32-bit,
+	 * so it wraps around 4 days.
+	 */
+	return htonl((u32)h_usecs);
+}
+
+static inline void erspan_build_header_v2(struct sk_buff *skb,
+					  __be32 id, u8 direction, u16 hwid,
+					  bool truncate, bool is_ipv4)
+{
+	struct ethhdr *eth = eth_hdr(skb);
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *md;
+	struct qtag_prefix {
+		__be16 eth_type;
+		__be16 tci;
+	} *qp;
+	u16 vlan_tci = 0;
+	u16 session_id;
+	u8 gra = 0; /* 100 usec */
+	u8 bso = 0; /* Bad/Short/Oversized */
+	u8 sgt = 0;
+	u8 tos;
+
+	tos = is_ipv4 ? ip_hdr(skb)->tos :
+			(ipv6_hdr(skb)->priority << 4) +
+			(ipv6_hdr(skb)->flow_lbl[0] >> 4);
+
+	/* Unlike v1, v2 does not have En field,
+	 * so only extract vlan tci field.
+	 */
+	if (eth->h_proto == htons(ETH_P_8021Q)) {
+		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
+		vlan_tci = ntohs(qp->tci);
+	}
+
+	skb_push(skb, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+	ershdr = (struct erspan_base_hdr *)skb->data;
+	memset(ershdr, 0, sizeof(*ershdr) + ERSPAN_V2_MDSIZE);
+
+	/* Build base header */
+	ershdr->ver_vlan = htons((vlan_tci & VLAN_MASK) |
+				 (ERSPAN_VERSION2 << VER_OFFSET));
+	session_id = (u16)(ntohl(id) & ID_MASK) |
+		     ((tos_to_cos(tos) << COS_OFFSET) & COS_MASK) |
+		     (bso << BSO_OFFSET & BSO_MASK) |
+		     ((truncate << T_OFFSET) & T_MASK);
+	ershdr->session_id = htons(session_id);
+
+	/* Build metadata */
+	md = (struct erspan_metadata *)(ershdr + 1);
+	md->u.md2.timestamp = erspan_get_timestamp();
+	md->u.md2.sgt = htons(sgt);
+	md->u.md2.flags = htons(((1 << P_OFFSET) & P_MASK) |
+				((hwid << HWID_OFFSET) & HWID_MASK) |
+				((direction << DIR_OFFSET) & DIR_MASK) |
+				((gra << GRA_OFFSET) & GRA_MASK));
+}
+
 #endif
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 304f7aa..0304ba2 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -49,6 +49,9 @@ int gnet_stats_copy_rate_est(struct gnet_dump *d,
 int gnet_stats_copy_queue(struct gnet_dump *d,
 			  struct gnet_stats_queue __percpu *cpu_q,
 			  struct gnet_stats_queue *q, __u32 qlen);
+void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+			     const struct gnet_stats_queue __percpu *cpu_q,
+			     const struct gnet_stats_queue *q, __u32 qlen);
 int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
 
 int gnet_stats_finish_copy(struct gnet_dump *d);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0358745..8e1bf9a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -77,6 +77,7 @@ struct inet_connection_sock_af_ops {
  * @icsk_af_ops		   Operations which are AF_INET{4,6} specific
  * @icsk_ulp_ops	   Pluggable ULP control hook
  * @icsk_ulp_data	   ULP private data
+ * @icsk_listen_portaddr_node	hash to the portaddr listener hashtable
  * @icsk_ca_state:	   Congestion control state
  * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
  * @icsk_pending:	   Scheduled timer event
@@ -101,6 +102,7 @@ struct inet_connection_sock {
 	const struct inet_connection_sock_af_ops *icsk_af_ops;
 	const struct tcp_ulp_ops  *icsk_ulp_ops;
 	void			  *icsk_ulp_data;
+	struct hlist_node         icsk_listen_portaddr_node;
 	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
 	__u8			  icsk_ca_state:6,
 				  icsk_ca_setsockopt:1,
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 2dbbbff..9141e95 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -111,6 +111,7 @@ struct inet_bind_hashbucket {
  */
 struct inet_listen_hashbucket {
 	spinlock_t		lock;
+	unsigned int		count;
 	struct hlist_head	head;
 };
 
@@ -132,12 +133,13 @@ struct inet_hashinfo {
 	/* Ok, let's try this, I give up, we do need a local binding
 	 * TCP hash as well as the others for fast bind/connect.
 	 */
-	struct inet_bind_hashbucket	*bhash;
-
-	unsigned int			bhash_size;
-	/* 4 bytes hole on 64 bit */
-
 	struct kmem_cache		*bind_bucket_cachep;
+	struct inet_bind_hashbucket	*bhash;
+	unsigned int			bhash_size;
+
+	/* The 2nd listener table hashed by local port and address */
+	unsigned int			lhash2_mask;
+	struct inet_listen_hashbucket	*lhash2;
 
 	/* All the above members are written once at bootup and
 	 * never written again _or_ are predominantly read-access.
@@ -145,14 +147,25 @@ struct inet_hashinfo {
 	 * Now align to a new cache line as all the following members
 	 * might be often dirty.
 	 */
-	/* All sockets in TCP_LISTEN state will be in here.  This is the only
-	 * table where wildcard'd TCP sockets can exist.  Hash function here
-	 * is just local port number.
+	/* All sockets in TCP_LISTEN state will be in listening_hash.
+	 * This is the only table where wildcard'd TCP sockets can
+	 * exist.  listening_hash is only hashed by local port number.
+	 * If lhash2 is initialized, the same socket will also be hashed
+	 * to lhash2 by port and address.
 	 */
 	struct inet_listen_hashbucket	listening_hash[INET_LHTABLE_SIZE]
 					____cacheline_aligned_in_smp;
 };
 
+#define inet_lhash2_for_each_icsk_rcu(__icsk, list) \
+	hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node)
+
+static inline struct inet_listen_hashbucket *
+inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
+{
+	return &h->lhash2[hash & h->lhash2_mask];
+}
+
 static inline struct inet_ehash_bucket *inet_ehash_bucket(
 	struct inet_hashinfo *hashinfo,
 	unsigned int hash)
@@ -208,6 +221,10 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child);
 void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
+void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+			 unsigned long numentries, int scale,
+			 unsigned long low_limit,
+			 unsigned long high_limit);
 
 bool inet_ehash_insert(struct sock *sk, struct sock *osk);
 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 39efb96..0a671c3 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -291,6 +291,31 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 
 int inet_sk_rebuild_header(struct sock *sk);
 
+/**
+ * inet_sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with inet_sk_state_store(). Used in places we don't hold socket lock:
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int inet_sk_state_load(const struct sock *sk)
+{
+	/* state change might impact lockless readers. */
+	return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * inet_sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with inet_sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+void inet_sk_state_store(struct sock *sk, int newstate);
+
+void inet_sk_set_state(struct sock *sk, int state);
+
 static inline unsigned int __inet_ehashfn(const __be32 laddr,
 					  const __u16 lport,
 					  const __be32 faddr,
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 1356fa6..8994955 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -93,8 +93,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 					   struct inet_timewait_death_row *dr,
 					   const int state);
 
-void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
-			   struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+			 struct inet_hashinfo *hashinfo);
 
 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
 			  bool rearm);
diff --git a/include/net/ip.h b/include/net/ip.h
index af8addb..746abff 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -26,12 +26,14 @@
 #include <linux/ip.h>
 #include <linux/in.h>
 #include <linux/skbuff.h>
+#include <linux/jhash.h>
 
 #include <net/inet_sock.h>
 #include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
 #include <net/flow_dissector.h>
+#include <net/netns/hash.h>
 
 #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
 #define IPV4_MIN_MTU		68			/* RFC 791 */
@@ -522,6 +524,13 @@ static inline unsigned int ipv4_addr_hash(__be32 ip)
 	return (__force unsigned int) ip;
 }
 
+static inline u32 ipv4_portaddr_hash(const struct net *net,
+				     __be32 saddr,
+				     unsigned int port)
+{
+	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
+}
+
 bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 10c9138..34ec321d 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -129,6 +129,8 @@ struct rt6_exception {
 
 struct rt6_info {
 	struct dst_entry		dst;
+	struct rt6_info __rcu		*rt6_next;
+	struct rt6_info			*from;
 
 	/*
 	 * Tail elements of dst_entry (__refcnt etc.)
@@ -147,6 +149,7 @@ struct rt6_info {
 	 */
 	struct list_head		rt6i_siblings;
 	unsigned int			rt6i_nsiblings;
+	atomic_t			rt6i_nh_upper_bound;
 
 	atomic_t			rt6i_ref;
 
@@ -168,19 +171,21 @@ struct rt6_info {
 	u32				rt6i_metric;
 	u32				rt6i_pmtu;
 	/* more non-fragment space at head required */
+	int				rt6i_nh_weight;
 	unsigned short			rt6i_nfheader_len;
 	u8				rt6i_protocol;
 	u8				exception_bucket_flushed:1,
-					unused:7;
+					should_flush:1,
+					unused:6;
 };
 
 #define for_each_fib6_node_rt_rcu(fn)					\
 	for (rt = rcu_dereference((fn)->leaf); rt;			\
-	     rt = rcu_dereference(rt->dst.rt6_next))
+	     rt = rcu_dereference(rt->rt6_next))
 
 #define for_each_fib6_walker_rt(w)					\
 	for (rt = (w)->leaf; rt;					\
-	     rt = rcu_dereference_protected(rt->dst.rt6_next, 1))
+	     rt = rcu_dereference_protected(rt->rt6_next, 1))
 
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
 {
@@ -203,11 +208,9 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
 {
 	struct rt6_info *rt;
 
-	for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES);
-	     rt = (struct rt6_info *)rt->dst.from);
+	for (rt = rt0; rt && !(rt->rt6i_flags & RTF_EXPIRES); rt = rt->from);
 	if (rt && rt != rt0)
 		rt0->dst.expires = rt->dst.expires;
-
 	dst_set_expires(&rt0->dst, timeout);
 	rt0->rt6i_flags |= RTF_EXPIRES;
 }
@@ -242,8 +245,8 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
 	u32 cookie = 0;
 
 	if (rt->rt6i_flags & RTF_PCPU ||
-	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
-		rt = (struct rt6_info *)(rt->dst.from);
+	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
+		rt = rt->from;
 
 	rt6_get_cookie_safe(rt, &cookie);
 
@@ -404,6 +407,7 @@ unsigned int fib6_tables_seq_read(struct net *net);
 int fib6_tables_dump(struct net *net, struct notifier_block *nb);
 
 void fib6_update_sernum(struct rt6_info *rt);
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 int fib6_rules_init(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 18e442e..27d23a6 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
 		(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
 }
 
+static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt)
+{
+	return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+	       RTF_GATEWAY;
+}
+
 void ip6_route_input(struct sk_buff *skb);
 struct dst_entry *ip6_route_input_lookup(struct net *net,
 					 struct net_device *dev,
@@ -165,10 +171,13 @@ struct rt6_rtnl_dump_arg {
 };
 
 int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-void rt6_ifdown(struct net *net, struct net_device *dev);
 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
+void rt6_disable_ip(struct net_device *dev, unsigned long event);
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
+void rt6_multipath_rebalance(struct rt6_info *rt);
 
 static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
 {
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index d66f70f..236e40b 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,10 @@ struct __ip6_tnl_parm {
 	__be32			o_key;
 
 	__u32			fwmark;
+	__u32			index;	/* ERSPAN type II index */
+	__u8			erspan_ver;	/* ERSPAN version */
+	__u8			dir;	/* direction */
+	__u16			hwid;	/* hwid */
 };
 
 /* IPv6 tunnel */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 24628f6..1f16773 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -116,8 +116,11 @@ struct ip_tunnel {
 	u32		o_seqno;	/* The last output seqno */
 	int		tun_hlen;	/* Precalculated header length */
 
-	/* This field used only by ERSPAN */
+	/* These four fields used only by ERSPAN */
 	u32		index;		/* ERSPAN type II index */
+	u8		erspan_ver;	/* ERSPAN version */
+	u8		dir;		/* ERSPAN direction */
+	u16		hwid;		/* ERSPAN hardware ID */
 
 	struct dst_cache dst_cache;
 
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index ff68cf2..eb0bec0 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -69,8 +69,7 @@ struct ip_vs_iphdr {
 };
 
 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
-				      int len, void *buffer,
-				      const struct ip_vs_iphdr *ipvsh)
+				      int len, void *buffer)
 {
 	return skb_header_pointer(skb, offset, len, buffer);
 }
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f73797e..9dc1230 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -22,6 +22,7 @@
 #include <net/flow.h>
 #include <net/flow_dissector.h>
 #include <net/snmp.h>
+#include <net/netns/hash.h>
 
 #define SIN6_LEN_RFC2133	24
 
@@ -673,6 +674,22 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
 					cpu_to_be32(0x0000ffff))) == 0UL;
 }
 
+static inline u32 ipv6_portaddr_hash(const struct net *net,
+				     const struct in6_addr *addr6,
+				     unsigned int port)
+{
+	unsigned int hash, mix = net_hash_mix(net);
+
+	if (ipv6_addr_any(addr6))
+		hash = jhash_1word(0, mix);
+	else if (ipv6_addr_v4mapped(addr6))
+		hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
+	else
+		hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
+
+	return hash ^ port;
+}
+
 /*
  * Check for a RFC 4843 ORCHID address
  * (Overlay Routable Cryptographic Hash Identifiers)
@@ -952,6 +969,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
 			      &inet6_sk(sk)->cork);
 }
 
+unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst);
+
 int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
 		   struct flowi6 *fl6);
 struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index eec143c..906e902 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1552,6 +1552,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
  * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
  *	driver for a key to indicate that sufficient tailroom must always
  *	be reserved for ICV or MIC, even when HW encryption is enabled.
+ * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
+ *	a TKIP key if it only requires MIC space. Do not set together with
+ *	@IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
  */
 enum ieee80211_key_flags {
 	IEEE80211_KEY_FLAG_GENERATE_IV_MGMT	= BIT(0),
@@ -1562,6 +1565,7 @@ enum ieee80211_key_flags {
 	IEEE80211_KEY_FLAG_PUT_IV_SPACE		= BIT(5),
 	IEEE80211_KEY_FLAG_RX_MGMT		= BIT(6),
 	IEEE80211_KEY_FLAG_RESERVE_TAILROOM	= BIT(7),
+	IEEE80211_KEY_FLAG_PUT_MIC_SPACE	= BIT(8),
 };
 
 /**
@@ -1593,8 +1597,8 @@ struct ieee80211_key_conf {
 	u8 icv_len;
 	u8 iv_len;
 	u8 hw_key_idx;
-	u8 flags;
 	s8 keyidx;
+	u16 flags;
 	u8 keylen;
 	u8 key[0];
 };
@@ -2056,6 +2060,9 @@ struct ieee80211_txq {
  *	The stack will not do fragmentation.
  *	The callback for @set_frag_threshold should be set as well.
  *
+ * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on
+ *	TDLS links.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -2098,6 +2105,7 @@ enum ieee80211_hw_flags {
 	IEEE80211_HW_TX_FRAG_LIST,
 	IEEE80211_HW_REPORTS_LOW_ACK,
 	IEEE80211_HW_SUPPORTS_TX_FRAG,
+	IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA,
 
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 4ed1040..73f8257 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -13,17 +13,17 @@
 
 const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
 
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
 #ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
 #endif
 
 int nf_conntrack_ipv4_compat_init(void);
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index 9cd55be9..effa8df 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -4,17 +4,17 @@
 
 extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
 
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
 #ifdef CONFIG_NF_CT_PROTO_DCCP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_SCTP
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6;
 #endif
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
 #endif
 
 #include <linux/sysctl.h>
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
new file mode 100644
index 0000000..adf8db4
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -0,0 +1,17 @@
+#ifndef _NF_CONNTRACK_COUNT_H
+#define _NF_CONNTRACK_COUNT_H
+
+struct nf_conncount_data;
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+					    unsigned int keylen);
+void nf_conncount_destroy(struct net *net, unsigned int family,
+			  struct nf_conncount_data *data);
+
+unsigned int nf_conncount_count(struct net *net,
+				struct nf_conncount_data *data,
+				const u32 *key,
+				unsigned int family,
+				const struct nf_conntrack_tuple *tuple,
+				const struct nf_conntrack_zone *zone);
+#endif
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 7ef56c1..a7220ee 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -27,6 +27,9 @@ struct nf_conntrack_l4proto {
 	/* Resolve clashes on insertion races. */
 	bool allow_clash;
 
+	/* protoinfo nlattr size, closes a hole */
+	u16 nlattr_size;
+
 	/* Try to fill in the third arg: dataoff is offset past network protocol
            hdr.  Return true if possible. */
 	bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff,
@@ -66,8 +69,6 @@ struct nf_conntrack_l4proto {
 	/* convert protoinfo to nfnetink attributes */
 	int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
 			 struct nf_conn *ct);
-	/* Calculate protoinfo nlattr size */
-	int (*nlattr_size)(void);
 
 	/* convert nfnetlink attributes to protoinfo */
 	int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct);
@@ -80,8 +81,6 @@ struct nf_conntrack_l4proto {
 			       struct nf_conntrack_tuple *t);
 	const struct nla_policy *nla_policy;
 
-	size_t nla_size;
-
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
 	struct {
 		int (*nlattr_to_obj)(struct nlattr *tb[],
@@ -109,7 +108,7 @@ struct nf_conntrack_l4proto {
 };
 
 /* Existing built-in generic protocol */
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
+extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
 
 #define MAX_NF_CT_PROTO 256
 
@@ -126,18 +125,18 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
 void nf_ct_l4proto_pernet_unregister_one(struct net *net,
 				const struct nf_conntrack_l4proto *proto);
 int nf_ct_l4proto_pernet_register(struct net *net,
-				  struct nf_conntrack_l4proto *const proto[],
+				  const struct nf_conntrack_l4proto *const proto[],
 				  unsigned int num_proto);
 void nf_ct_l4proto_pernet_unregister(struct net *net,
-				struct nf_conntrack_l4proto *const proto[],
+				const struct nf_conntrack_l4proto *const proto[],
 				unsigned int num_proto);
 
 /* Protocol global registration. */
-int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
 void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[],
+int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
 			   unsigned int num_proto);
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[],
+void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
 			      unsigned int num_proto);
 
 /* Generic netlink helpers */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
new file mode 100644
index 0000000..b22b220
--- /dev/null
+++ b/include/net/netfilter/nf_flow_table.h
@@ -0,0 +1,122 @@
+#ifndef _NF_FLOW_TABLE_H
+#define _NF_FLOW_TABLE_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
+#include <linux/rcupdate.h>
+#include <net/dst.h>
+
+struct nf_flowtable;
+
+struct nf_flowtable_type {
+	struct list_head		list;
+	int				family;
+	void				(*gc)(struct work_struct *work);
+	const struct rhashtable_params	*params;
+	nf_hookfn			*hook;
+	struct module			*owner;
+};
+
+struct nf_flowtable {
+	struct rhashtable		rhashtable;
+	const struct nf_flowtable_type	*type;
+	struct delayed_work		gc_work;
+};
+
+enum flow_offload_tuple_dir {
+	FLOW_OFFLOAD_DIR_ORIGINAL,
+	FLOW_OFFLOAD_DIR_REPLY,
+	__FLOW_OFFLOAD_DIR_MAX		= FLOW_OFFLOAD_DIR_REPLY,
+};
+#define FLOW_OFFLOAD_DIR_MAX	(__FLOW_OFFLOAD_DIR_MAX + 1)
+
+struct flow_offload_tuple {
+	union {
+		struct in_addr		src_v4;
+		struct in6_addr		src_v6;
+	};
+	union {
+		struct in_addr		dst_v4;
+		struct in6_addr		dst_v6;
+	};
+	struct {
+		__be16			src_port;
+		__be16			dst_port;
+	};
+
+	int				iifidx;
+
+	u8				l3proto;
+	u8				l4proto;
+	u8				dir;
+
+	int				oifidx;
+
+	struct dst_entry		*dst_cache;
+};
+
+struct flow_offload_tuple_rhash {
+	struct rhash_head		node;
+	struct flow_offload_tuple	tuple;
+};
+
+#define FLOW_OFFLOAD_SNAT	0x1
+#define FLOW_OFFLOAD_DNAT	0x2
+#define FLOW_OFFLOAD_DYING	0x4
+
+struct flow_offload {
+	struct flow_offload_tuple_rhash		tuplehash[FLOW_OFFLOAD_DIR_MAX];
+	u32					flags;
+	union {
+		/* Your private driver data here. */
+		u32		timeout;
+	};
+};
+
+#define NF_FLOW_TIMEOUT (30 * HZ)
+
+struct nf_flow_route {
+	struct {
+		struct dst_entry	*dst;
+		int			ifindex;
+	} tuple[FLOW_OFFLOAD_DIR_MAX];
+};
+
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
+					struct nf_flow_route *route);
+void flow_offload_free(struct flow_offload *flow);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
+void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow);
+struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
+						     struct flow_offload_tuple *tuple);
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+			  void (*iter)(struct flow_offload *flow, void *data),
+			  void *data);
+void nf_flow_offload_work_gc(struct work_struct *work);
+extern const struct rhashtable_params nf_flow_offload_rhash_params;
+
+void flow_offload_dead(struct flow_offload *flow);
+
+int nf_flow_snat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir);
+int nf_flow_dnat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir);
+
+struct flow_ports {
+	__be16 source, dest;
+};
+
+unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+				     const struct nf_hook_state *state);
+unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+				       const struct nf_hook_state *state);
+
+#define MODULE_ALIAS_NF_FLOWTABLE(family)	\
+	MODULE_ALIAS("nf-flowtable-" __stringify(family))
+
+#endif /* _FLOW_OFFLOAD_H */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 814058d..a50a69f 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -25,7 +25,7 @@ struct nf_queue_entry {
 struct nf_queue_handler {
 	int		(*outfn)(struct nf_queue_entry *entry,
 				 unsigned int queuenum);
-	unsigned int	(*nf_hook_drop)(struct net *net);
+	void		(*nf_hook_drop)(struct net *net);
 };
 
 void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index fecc611..dd23895 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -9,6 +9,7 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_tables.h>
 #include <linux/u64_stats_sync.h>
+#include <net/netfilter/nf_flow_table.h>
 #include <net/netlink.h>
 
 #define NFT_JUMP_STACK_SIZE	16
@@ -54,8 +55,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
 	pkt->xt.state = state;
 }
 
-static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
-						struct sk_buff *skb)
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+					  struct sk_buff *skb)
 {
 	pkt->tprot_set = false;
 	pkt->tprot = 0;
@@ -63,14 +64,6 @@ static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
 	pkt->xt.fragoff = 0;
 }
 
-static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
-					  struct sk_buff *skb,
-					  const struct nf_hook_state *state)
-{
-	nft_set_pktinfo(pkt, skb, state);
-	nft_set_pktinfo_proto_unspec(pkt, skb);
-}
-
 /**
  * 	struct nft_verdict - nf_tables verdict
  *
@@ -424,6 +417,11 @@ struct nft_set {
 		__attribute__((aligned(__alignof__(u64))));
 };
 
+static inline bool nft_set_is_anonymous(const struct nft_set *set)
+{
+	return set->flags & NFT_SET_ANONYMOUS;
+}
+
 static inline void *nft_set_priv(const struct nft_set *set)
 {
 	return (void *)set->data;
@@ -883,7 +881,7 @@ enum nft_chain_type {
  * 	@family: address family
  * 	@owner: module owner
  * 	@hook_mask: mask of valid hooks
- * 	@hooks: hookfn overrides
+ * 	@hooks: array of hook functions
  */
 struct nf_chain_type {
 	const char			*name;
@@ -905,8 +903,6 @@ struct nft_stats {
 	struct u64_stats_sync	syncp;
 };
 
-#define NFT_HOOK_OPS_MAX		2
-
 /**
  *	struct nft_base_chain - nf_tables base chain
  *
@@ -918,7 +914,7 @@ struct nft_stats {
  *	@dev_name: device name that this base chain is attached to (if any)
  */
 struct nft_base_chain {
-	struct nf_hook_ops		ops[NFT_HOOK_OPS_MAX];
+	struct nf_hook_ops		ops;
 	const struct nf_chain_type	*type;
 	u8				policy;
 	u8				flags;
@@ -948,6 +944,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
  *	@chains: chains in the table
  *	@sets: sets in the table
  *	@objects: stateful objects in the table
+ *	@flowtables: flow tables in the table
  *	@hgenerator: handle generator state
  *	@use: number of chain references to this table
  *	@flags: table flag (see enum nft_table_flags)
@@ -959,6 +956,7 @@ struct nft_table {
 	struct list_head		chains;
 	struct list_head		sets;
 	struct list_head		objects;
+	struct list_head		flowtables;
 	u64				hgenerator;
 	u32				use;
 	u16				flags:14,
@@ -979,9 +977,6 @@ enum nft_af_flags {
  *	@owner: module owner
  *	@tables: used internally
  *	@flags: family flags
- *	@nops: number of hook ops in this family
- *	@hook_ops_init: initialization function for chain hook ops
- *	@hooks: hookfn overrides for packet validation
  */
 struct nft_af_info {
 	struct list_head		list;
@@ -990,10 +985,6 @@ struct nft_af_info {
 	struct module			*owner;
 	struct list_head		tables;
 	u32				flags;
-	unsigned int			nops;
-	void				(*hook_ops_init)(struct nf_hook_ops *,
-							 unsigned int);
-	nf_hookfn			*hooks[NF_MAX_HOOKS];
 };
 
 int nft_register_afinfo(struct net *, struct nft_af_info *);
@@ -1097,6 +1088,44 @@ int nft_register_obj(struct nft_object_type *obj_type);
 void nft_unregister_obj(struct nft_object_type *obj_type);
 
 /**
+ *	struct nft_flowtable - nf_tables flow table
+ *
+ *	@list: flow table list node in table list
+ * 	@table: the table the flow table is contained in
+ *	@name: name of this flow table
+ *	@hooknum: hook number
+ *	@priority: hook priority
+ *	@ops_len: number of hooks in array
+ *	@genmask: generation mask
+ *	@use: number of references to this flow table
+ *	@data: rhashtable and garbage collector
+ * 	@ops: array of hooks
+ */
+struct nft_flowtable {
+	struct list_head		list;
+	struct nft_table		*table;
+	char				*name;
+	int				hooknum;
+	int				priority;
+	int				ops_len;
+	u32				genmask:2,
+					use:30;
+	/* runtime data below here */
+	struct nf_hook_ops		*ops ____cacheline_aligned;
+	struct nf_flowtable		data;
+};
+
+struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+						 const struct nlattr *nla,
+						 u8 genmask);
+void nft_flow_table_iterate(struct net *net,
+			    void (*iter)(struct nf_flowtable *flowtable, void *data),
+			    void *data);
+
+void nft_register_flowtable_type(struct nf_flowtable_type *type);
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
+
+/**
  *	struct nft_traceinfo - nft tracing information and state
  *
  *	@pkt: pktinfo currently processed
@@ -1125,9 +1154,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt,
 
 void nft_trace_notify(struct nft_traceinfo *info);
 
-#define nft_dereference(p)					\
-	nfnl_dereference(p, NFNL_SUBSYS_NFTABLES)
-
 #define MODULE_ALIAS_NFT_FAMILY(family)	\
 	MODULE_ALIAS("nft-afinfo-" __stringify(family))
 
@@ -1332,4 +1358,11 @@ struct nft_trans_obj {
 #define nft_trans_obj(trans)	\
 	(((struct nft_trans_obj *)trans->data)->obj)
 
+struct nft_trans_flowtable {
+	struct nft_flowtable		*flowtable;
+};
+
+#define nft_trans_flowtable(trans)	\
+	(((struct nft_trans_flowtable *)trans->data)->flowtable)
+
 #endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index f0896ba..ed7b511 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -5,15 +5,11 @@
 #include <net/netfilter/nf_tables.h>
 #include <net/ip.h>
 
-static inline void
-nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
-		     struct sk_buff *skb,
-		     const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+					struct sk_buff *skb)
 {
 	struct iphdr *ip;
 
-	nft_set_pktinfo(pkt, skb, state);
-
 	ip = ip_hdr(pkt->skb);
 	pkt->tprot_set = true;
 	pkt->tprot = ip->protocol;
@@ -21,10 +17,8 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
 	pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
 }
 
-static inline int
-__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
-				struct sk_buff *skb,
-				const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+						  struct sk_buff *skb)
 {
 	struct iphdr *iph, _iph;
 	u32 len, thoff;
@@ -52,16 +46,11 @@ __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
 	return 0;
 }
 
-static inline void
-nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
-			      struct sk_buff *skb,
-			      const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+						 struct sk_buff *skb)
 {
-	nft_set_pktinfo(pkt, skb, state);
-	if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
-		nft_set_pktinfo_proto_unspec(pkt, skb);
+	if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0)
+		nft_set_pktinfo_unspec(pkt, skb);
 }
 
-extern struct nft_af_info nft_af_ipv4;
-
 #endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index b8065b72..dabe6fd 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -5,20 +5,16 @@
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/ipv6.h>
 
-static inline void
-nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
-		     struct sk_buff *skb,
-		     const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+					struct sk_buff *skb)
 {
 	unsigned int flags = IP6_FH_F_AUTH;
 	int protohdr, thoff = 0;
 	unsigned short frag_off;
 
-	nft_set_pktinfo(pkt, skb, state);
-
 	protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
 	if (protohdr < 0) {
-		nft_set_pktinfo_proto_unspec(pkt, skb);
+		nft_set_pktinfo_unspec(pkt, skb);
 		return;
 	}
 
@@ -28,10 +24,8 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
 	pkt->xt.fragoff = frag_off;
 }
 
-static inline int
-__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
-				struct sk_buff *skb,
-				const struct nf_hook_state *state)
+static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+						  struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_IPV6)
 	unsigned int flags = IP6_FH_F_AUTH;
@@ -68,16 +62,11 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
 #endif
 }
 
-static inline void
-nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
-			      struct sk_buff *skb,
-			      const struct nf_hook_state *state)
+static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+						 struct sk_buff *skb)
 {
-	nft_set_pktinfo(pkt, skb, state);
-	if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
-		nft_set_pktinfo_proto_unspec(pkt, skb);
+	if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0)
+		nft_set_pktinfo_unspec(pkt, skb);
 }
 
-extern struct nft_af_info nft_af_ipv6;
-
 #endif
diff --git a/include/net/netns/core.h b/include/net/netns/core.h
index 0ad4d0c..36c2d998 100644
--- a/include/net/netns/core.h
+++ b/include/net/netns/core.h
@@ -11,7 +11,10 @@ struct netns_core {
 
 	int	sysctl_somaxconn;
 
-	struct prot_inuse __percpu *inuse;
+#ifdef CONFIG_PROC_FS
+	int __percpu *sock_inuse;
+	struct prot_inuse __percpu *prot_inuse;
+#endif
 };
 
 #endif
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index cc00af2..ca04334 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -17,7 +17,17 @@ struct netns_nf {
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header *nf_log_dir_header;
 #endif
-	struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+	struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
+	struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+	struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS];
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS];
+#endif
+#if IS_ENABLED(CONFIG_DECNET)
+	struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS];
+#endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 	bool			defrag_ipv4;
 #endif
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index ebc8132..0db7fb3 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -122,9 +122,12 @@ struct netns_sctp {
 	/* Flag to indicate if PR-CONFIG is enabled. */
 	int reconf_enable;
 
-	/* Flag to idicate if SCTP-AUTH is enabled */
+	/* Flag to indicate if SCTP-AUTH is enabled */
 	int auth_enable;
 
+	/* Flag to indicate if stream interleave is enabled */
+	int intl_enable;
+
 	/*
 	 * Policy to control SCTP IPv4 address scoping
 	 * 0   - Disable IPv4 address scoping
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 8e08b6d..0d1343c 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -39,9 +39,11 @@ struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 				bool create);
 void tcf_chain_put(struct tcf_chain *chain);
 int tcf_block_get(struct tcf_block **p_block,
-		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
+		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+		  struct netlink_ext_ack *extack);
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-		      struct tcf_block_ext_info *ei);
+		      struct tcf_block_ext_info *ei,
+		      struct netlink_ext_ack *extack);
 void tcf_block_put(struct tcf_block *block);
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 		       struct tcf_block_ext_info *ei);
@@ -77,14 +79,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 #else
 static inline
 int tcf_block_get(struct tcf_block **p_block,
-		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+		  struct netlink_ext_ack *extack)
 {
 	return 0;
 }
 
 static inline
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-		      struct tcf_block_ext_info *ei)
+		      struct tcf_block_ext_info *ei,
+		      struct netlink_ext_ack *extack)
 {
 	return 0;
 }
@@ -727,6 +731,11 @@ struct tc_cookie {
 	u32 len;
 };
 
+struct tc_qopt_offload_stats {
+	struct gnet_stats_basic_packed *bstats;
+	struct gnet_stats_queue *qstats;
+};
+
 enum tc_red_command {
 	TC_RED_REPLACE,
 	TC_RED_DESTROY,
@@ -740,10 +749,6 @@ struct tc_red_qopt_offload_params {
 	u32 probability;
 	bool is_ecn;
 };
-struct tc_red_qopt_offload_stats {
-	struct gnet_stats_basic_packed *bstats;
-	struct gnet_stats_queue *qstats;
-};
 
 struct tc_red_qopt_offload {
 	enum tc_red_command command;
@@ -751,7 +756,7 @@ struct tc_red_qopt_offload {
 	u32 parent;
 	union {
 		struct tc_red_qopt_offload_params set;
-		struct tc_red_qopt_offload_stats stats;
+		struct tc_qopt_offload_stats stats;
 		struct red_stats *xstats;
 	};
 };
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index d1f413f..e2c75f5 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -89,7 +89,8 @@ extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
 
 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-			       unsigned int limit);
+			       unsigned int limit,
+			       struct netlink_ext_ack *extack);
 
 int register_qdisc(struct Qdisc_ops *qops);
 int unregister_qdisc(struct Qdisc_ops *qops);
@@ -101,20 +102,23 @@ void qdisc_hash_del(struct Qdisc *q);
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
 struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-					struct nlattr *tab);
+					struct nlattr *tab,
+					struct netlink_ext_ack *extack);
 void qdisc_put_rtab(struct qdisc_rate_table *tab);
 void qdisc_put_stab(struct qdisc_size_table *tab);
 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc);
-int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-		    struct net_device *dev, struct netdev_queue *txq,
-		    spinlock_t *root_lock, bool validate);
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+		     struct net_device *dev, struct netdev_queue *txq,
+		     spinlock_t *root_lock, bool validate);
 
 void __qdisc_run(struct Qdisc *q);
 
 static inline void qdisc_run(struct Qdisc *q)
 {
-	if (qdisc_run_begin(q))
+	if (qdisc_run_begin(q)) {
 		__qdisc_run(q);
+		qdisc_run_end(q);
+	}
 }
 
 static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index ead01874..14b6b3a 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,10 +13,10 @@ enum rtnl_link_flags {
 	RTNL_FLAG_DOIT_UNLOCKED = 1,
 };
 
-int __rtnl_register(int protocol, int msgtype,
-		    rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
 void rtnl_register(int protocol, int msgtype,
 		   rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
+int rtnl_register_module(struct module *owner, int protocol, int msgtype,
+			 rtnl_doit_func, rtnl_dumpit_func, unsigned int flags);
 int rtnl_unregister(int protocol, int msgtype);
 void rtnl_unregister_all(int protocol);
 
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 83a3e47..ac029d5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -71,6 +71,7 @@ struct Qdisc {
 				      * qdisc_tree_decrease_qlen() should stop.
 				      */
 #define TCQ_F_INVISIBLE		0x80 /* invisible by default in dump */
+#define TCQ_F_NOLOCK		0x100 /* qdisc does not require locking */
 #define TCQ_F_OFFLOADED		0x200 /* qdisc is offloaded to HW */
 	u32			limit;
 	const struct Qdisc_ops	*ops;
@@ -88,14 +89,14 @@ struct Qdisc {
 	/*
 	 * For performance sake on SMP, we put highly modified fields at the end
 	 */
-	struct sk_buff		*gso_skb ____cacheline_aligned_in_smp;
+	struct sk_buff_head	gso_skb ____cacheline_aligned_in_smp;
 	struct qdisc_skb_head	q;
 	struct gnet_stats_basic_packed bstats;
 	seqcount_t		running;
 	struct gnet_stats_queue	qstats;
 	unsigned long		state;
 	struct Qdisc            *next_sched;
-	struct sk_buff		*skb_bad_txq;
+	struct sk_buff_head	skb_bad_txq;
 	int			padded;
 	refcount_t		refcnt;
 
@@ -150,19 +151,23 @@ struct Qdisc_class_ops {
 	/* Child qdisc manipulation */
 	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
 	int			(*graft)(struct Qdisc *, unsigned long cl,
-					struct Qdisc *, struct Qdisc **);
+					struct Qdisc *, struct Qdisc **,
+					struct netlink_ext_ack *extack);
 	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
 	void			(*qlen_notify)(struct Qdisc *, unsigned long);
 
 	/* Class manipulation routines */
 	unsigned long		(*find)(struct Qdisc *, u32 classid);
 	int			(*change)(struct Qdisc *, u32, u32,
-					struct nlattr **, unsigned long *);
+					struct nlattr **, unsigned long *,
+					struct netlink_ext_ack *);
 	int			(*delete)(struct Qdisc *, unsigned long);
 	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);
 
 	/* Filter manipulation */
-	struct tcf_block *	(*tcf_block)(struct Qdisc *, unsigned long);
+	struct tcf_block *	(*tcf_block)(struct Qdisc *sch,
+					     unsigned long arg,
+					     struct netlink_ext_ack *extack);
 	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
 					u32 classid);
 	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -179,6 +184,7 @@ struct Qdisc_ops {
 	const struct Qdisc_class_ops	*cl_ops;
 	char			id[IFNAMSIZ];
 	int			priv_size;
+	unsigned int		static_flags;
 
 	int 			(*enqueue)(struct sk_buff *skb,
 					   struct Qdisc *sch,
@@ -186,11 +192,14 @@ struct Qdisc_ops {
 	struct sk_buff *	(*dequeue)(struct Qdisc *);
 	struct sk_buff *	(*peek)(struct Qdisc *);
 
-	int			(*init)(struct Qdisc *, struct nlattr *arg);
+	int			(*init)(struct Qdisc *sch, struct nlattr *arg,
+					struct netlink_ext_ack *extack);
 	void			(*reset)(struct Qdisc *);
 	void			(*destroy)(struct Qdisc *);
-	int			(*change)(struct Qdisc *, struct nlattr *arg);
-	void			(*attach)(struct Qdisc *);
+	int			(*change)(struct Qdisc *sch,
+					  struct nlattr *arg,
+					  struct netlink_ext_ack *extack);
+	void			(*attach)(struct Qdisc *sch);
 
 	int			(*dump)(struct Qdisc *, struct sk_buff *);
 	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
@@ -279,7 +288,6 @@ struct tcf_block {
 	struct net *net;
 	struct Qdisc *q;
 	struct list_head cb_list;
-	struct work_struct work;
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
@@ -290,11 +298,31 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
 }
 
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+{
+	return this_cpu_ptr(q->cpu_qstats)->qlen;
+}
+
 static inline int qdisc_qlen(const struct Qdisc *q)
 {
 	return q->q.qlen;
 }
 
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
+{
+	__u32 qlen = 0;
+	int i;
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		for_each_possible_cpu(i)
+			qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+	} else {
+		qlen = q->q.qlen;
+	}
+
+	return qlen;
+}
+
 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
 {
 	return (struct qdisc_skb_cb *)skb->cb;
@@ -443,9 +471,11 @@ void qdisc_destroy(struct Qdisc *qdisc);
 void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
 			       unsigned int len);
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-			  const struct Qdisc_ops *ops);
+			  const struct Qdisc_ops *ops,
+			  struct netlink_ext_ack *extack);
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
-				const struct Qdisc_ops *ops, u32 parentid);
+				const struct Qdisc_ops *ops, u32 parentid,
+				struct netlink_ext_ack *extack);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
 			       const struct qdisc_size_table *stab);
 int skb_do_redirect(struct sk_buff *);
@@ -631,12 +661,39 @@ static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
 	sch->qstats.backlog -= qdisc_pkt_len(skb);
 }
 
+static inline void qdisc_qstats_cpu_backlog_dec(struct Qdisc *sch,
+						const struct sk_buff *skb)
+{
+	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
 static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
 					    const struct sk_buff *skb)
 {
 	sch->qstats.backlog += qdisc_pkt_len(skb);
 }
 
+static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
+						const struct sk_buff *skb)
+{
+	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
+}
+
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+{
+	this_cpu_inc(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+{
+	this_cpu_dec(sch->cpu_qstats->qlen);
+}
+
+static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
+{
+	this_cpu_inc(sch->cpu_qstats->requeues);
+}
+
 static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
 {
 	sch->qstats.drops += count;
@@ -767,26 +824,30 @@ static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
 /* generic pseudo peek method for non-work-conserving qdisc */
 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
 {
+	struct sk_buff *skb = skb_peek(&sch->gso_skb);
+
 	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
-	if (!sch->gso_skb) {
-		sch->gso_skb = sch->dequeue(sch);
-		if (sch->gso_skb) {
+	if (!skb) {
+		skb = sch->dequeue(sch);
+
+		if (skb) {
+			__skb_queue_head(&sch->gso_skb, skb);
 			/* it's still part of the queue */
-			qdisc_qstats_backlog_inc(sch, sch->gso_skb);
+			qdisc_qstats_backlog_inc(sch, skb);
 			sch->q.qlen++;
 		}
 	}
 
-	return sch->gso_skb;
+	return skb;
 }
 
 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 {
-	struct sk_buff *skb = sch->gso_skb;
+	struct sk_buff *skb = skb_peek(&sch->gso_skb);
 
 	if (skb) {
-		sch->gso_skb = NULL;
+		skb = __skb_dequeue(&sch->gso_skb);
 		qdisc_qstats_backlog_dec(sch, skb);
 		sch->q.qlen--;
 	} else {
@@ -844,6 +905,14 @@ static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
 	qdisc_qstats_drop(sch);
 }
 
+static inline int qdisc_drop_cpu(struct sk_buff *skb, struct Qdisc *sch,
+				 struct sk_buff **to_free)
+{
+	__qdisc_drop(skb, to_free);
+	qdisc_qstats_cpu_drop(sch);
+
+	return NET_XMIT_DROP;
+}
 
 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
 			     struct sk_buff **to_free)
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index deaafa9..20ff237 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -145,12 +145,13 @@ SCTP_SUBTYPE_CONSTRUCTOR(OTHER,		enum sctp_event_other,	other)
 SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE,	enum sctp_event_primitive, primitive)
 
 
-#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA)
+#define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA || \
+			       a->chunk_hdr->type == SCTP_CID_I_DATA)
 
 /* Calculate the actual data size in a data chunk */
-#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end)\
-		       		- (unsigned long)(c->chunk_hdr)\
-				- sizeof(struct sctp_data_chunk)))
+#define SCTP_DATA_SNDSIZE(c) ((int)((unsigned long)(c->chunk_end) - \
+				    (unsigned long)(c->chunk_hdr) - \
+				    sctp_datachk_len(&c->asoc->stream)))
 
 /* Internal error codes */
 enum sctp_ierror {
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 906a9c0..20c0c1b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated;
 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
-int sctp_transport_walk_start(struct rhashtable_iter *iter);
+void sctp_transport_walk_start(struct rhashtable_iter *iter);
 void sctp_transport_walk_stop(struct rhashtable_iter *iter);
 struct sctp_transport *sctp_transport_get_next(struct net *net,
 			struct rhashtable_iter *iter);
@@ -444,13 +444,13 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
 	int frag = pmtu;
 
 	frag -= sp->pf->af->net_header_len;
-	frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
+	frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
 
 	if (asoc->user_frag)
 		frag = min_t(int, frag, asoc->user_frag);
 
 	frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
-					    sizeof(struct sctp_data_chunk)));
+					    sctp_datachk_len(&asoc->stream)));
 
 	return frag;
 }
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 70fb397..2883c43 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -197,10 +197,14 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
 struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
 				 const __u32 lowest_tsn,
 				 const struct sctp_chunk *chunk);
-struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+				   __u8 flags, int paylen, gfp_t gfp);
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+				     __u32 new_cum_tsn, size_t nstreams,
+				     struct sctp_ifwdtsn_skip *skiplist);
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
 					    const struct sctp_sndrcvinfo *sinfo,
-					    int len, const __u8 flags,
-					    __u16 ssn, gfp_t gfp);
+					    int len, __u8 flags, gfp_t gfp);
 struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
 				  const __u32 lowest_tsn);
 struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc);
@@ -342,7 +346,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
 	__u16 size;
 
 	size = ntohs(chunk->chunk_hdr->length);
-	size -= sizeof(struct sctp_data_chunk);
+	size -= sctp_datahdr_len(&chunk->asoc->stream);
 
 	return size;
 }
@@ -358,6 +362,12 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
 	 typecheck(__u32, b) && \
 	 ((__s32)((a) - (b)) <= 0))
 
+/* Compare two MIDs */
+#define MID_lt(a, b)	\
+	(typecheck(__u32, a) && \
+	 typecheck(__u32, b) && \
+	 ((__s32)((a) - (b)) < 0))
+
 /* Compare two SSNs */
 #define SSN_lt(a,b)		\
 	(typecheck(__u16, a) && \
diff --git a/include/net/sctp/stream_interleave.h b/include/net/sctp/stream_interleave.h
new file mode 100644
index 0000000..6657711
--- /dev/null
+++ b/include/net/sctp/stream_interleave.h
@@ -0,0 +1,61 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * These are definitions used by the stream schedulers, defined in RFC
+ * draft ndata (https://tools.ietf.org/html/draft-ietf-tsvwg-sctp-ndata-11)
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation  is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ *                 ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresses:
+ *    lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ *   Xin Long <lucien.xin@gmail.com>
+ */
+
+#ifndef __sctp_stream_interleave_h__
+#define __sctp_stream_interleave_h__
+
+struct sctp_stream_interleave {
+	__u16	data_chunk_len;
+	__u16	ftsn_chunk_len;
+	/* (I-)DATA process */
+	struct sctp_chunk *(*make_datafrag)(const struct sctp_association *asoc,
+					    const struct sctp_sndrcvinfo *sinfo,
+					    int len, __u8 flags, gfp_t gfp);
+	void	(*assign_number)(struct sctp_chunk *chunk);
+	bool	(*validate_data)(struct sctp_chunk *chunk);
+	int	(*ulpevent_data)(struct sctp_ulpq *ulpq,
+				 struct sctp_chunk *chunk, gfp_t gfp);
+	int	(*enqueue_event)(struct sctp_ulpq *ulpq,
+				 struct sctp_ulpevent *event);
+	void	(*renege_events)(struct sctp_ulpq *ulpq,
+				 struct sctp_chunk *chunk, gfp_t gfp);
+	void	(*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+	void	(*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
+	/* (I-)FORWARD-TSN process */
+	void	(*generate_ftsn)(struct sctp_outq *q, __u32 ctsn);
+	bool	(*validate_ftsn)(struct sctp_chunk *chunk);
+	void	(*report_ftsn)(struct sctp_ulpq *ulpq, __u32 ftsn);
+	void	(*handle_ftsn)(struct sctp_ulpq *ulpq,
+			       struct sctp_chunk *chunk);
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream);
+
+#endif /* __sctp_stream_interleave_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 9a5ccf0..02369e3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -89,6 +89,7 @@ struct sctp_stream;
 #include <net/sctp/tsnmap.h>
 #include <net/sctp/ulpevent.h>
 #include <net/sctp/ulpqueue.h>
+#include <net/sctp/stream_interleave.h>
 
 /* Structures useful for managing bind/connect. */
 
@@ -217,6 +218,7 @@ struct sctp_sock {
 		disable_fragments:1,
 		v4mapped:1,
 		frag_interleave:1,
+		strm_interleave:1,
 		recvrcvinfo:1,
 		recvnxtinfo:1,
 		data_ready_signalled:1;
@@ -397,6 +399,28 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
 #define sctp_ssn_skip(stream, type, sid, ssn) \
 	((stream)->type[sid].ssn = ssn + 1)
 
+/* What is the current MID number for this stream? */
+#define sctp_mid_peek(stream, type, sid) \
+	((stream)->type[sid].mid)
+
+/* Return the next MID number for this stream.  */
+#define sctp_mid_next(stream, type, sid) \
+	((stream)->type[sid].mid++)
+
+/* Skip over this mid and all below. */
+#define sctp_mid_skip(stream, type, sid, mid) \
+	((stream)->type[sid].mid = mid + 1)
+
+#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
+
+/* What is the current MID_uo number for this stream? */
+#define sctp_mid_uo_peek(stream, type, sid) \
+	((stream)->type[sid].mid_uo)
+
+/* Return the next MID_uo number for this stream.  */
+#define sctp_mid_uo_next(stream, type, sid) \
+	((stream)->type[sid].mid_uo++)
+
 /*
  * Pointers to address related SCTP functions.
  * (i.e. things that depend on the address family.)
@@ -574,6 +598,8 @@ struct sctp_chunk {
 		struct sctp_addiphdr *addip_hdr;
 		struct sctp_fwdtsn_hdr *fwdtsn_hdr;
 		struct sctp_authhdr *auth_hdr;
+		struct sctp_idatahdr *idata_hdr;
+		struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
 	} subh;
 
 	__u8 *chunk_end;
@@ -620,6 +646,7 @@ struct sctp_chunk {
 	__u16	rtt_in_progress:1,	/* This chunk used for RTT calc? */
 		has_tsn:1,		/* Does this chunk have a TSN yet? */
 		has_ssn:1,		/* Does this chunk have a SSN yet? */
+#define has_mid has_ssn
 		singleton:1,		/* Only chunk in the packet? */
 		end_of_packet:1,	/* Last chunk in the packet? */
 		ecn_ce_done:1,		/* Have we processed the ECN CE bit? */
@@ -1073,6 +1100,7 @@ void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
 void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
 void sctp_prsctp_prune(struct sctp_association *asoc,
 		       struct sctp_sndrcvinfo *sinfo, int msg_len);
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
 /* Uncork and flush an outqueue.  */
 static inline void sctp_outq_cork(struct sctp_outq *q)
 {
@@ -1357,13 +1385,25 @@ struct sctp_stream_out_ext {
 };
 
 struct sctp_stream_out {
-	__u16	ssn;
-	__u8	state;
+	union {
+		__u32 mid;
+		__u16 ssn;
+	};
+	__u32 mid_uo;
 	struct sctp_stream_out_ext *ext;
+	__u8 state;
 };
 
 struct sctp_stream_in {
-	__u16	ssn;
+	union {
+		__u32 mid;
+		__u16 ssn;
+	};
+	__u32 mid_uo;
+	__u32 fsn;
+	__u32 fsn_uo;
+	char pd_mode;
+	char pd_mode_uo;
 };
 
 struct sctp_stream {
@@ -1387,11 +1427,32 @@ struct sctp_stream {
 			struct sctp_stream_out_ext *rr_next;
 		};
 	};
+	struct sctp_stream_interleave *si;
 };
 
 #define SCTP_STREAM_CLOSED		0x00
 #define SCTP_STREAM_OPEN		0x01
 
+static inline __u16 sctp_datachk_len(const struct sctp_stream *stream)
+{
+	return stream->si->data_chunk_len;
+}
+
+static inline __u16 sctp_datahdr_len(const struct sctp_stream *stream)
+{
+	return stream->si->data_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
+static inline __u16 sctp_ftsnchk_len(const struct sctp_stream *stream)
+{
+	return stream->si->ftsn_chunk_len;
+}
+
+static inline __u16 sctp_ftsnhdr_len(const struct sctp_stream *stream)
+{
+	return stream->si->ftsn_chunk_len - sizeof(struct sctp_chunkhdr);
+}
+
 /* SCTP_GET_ASSOC_STATS counters */
 struct sctp_priv_assoc_stats {
 	/* Maximum observed rto in the association during subsequent
@@ -1940,6 +2001,7 @@ struct sctp_association {
 	__u8 need_ecne:1,	/* Need to send an ECNE Chunk? */
 	     temp:1,		/* Is it a temporary association? */
 	     force_delay:1,
+	     intl_enable:1,
 	     prsctp_enable:1,
 	     reconf_enable:1;
 
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 231dc42..51b4e06 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -45,19 +45,29 @@
 /* A structure to carry information to the ULP (e.g. Sockets API) */
 /* Warning: This sits inside an skb.cb[] area.  Be very careful of
  * growing this structure as it is at the maximum limit now.
+ *
+ * sctp_ulpevent is saved in sk->cb(48 bytes), whose last 4 bytes
+ * have been taken by sock_skb_cb, So here it has to use 'packed'
+ * to make sctp_ulpevent fit into the rest 44 bytes.
  */
 struct sctp_ulpevent {
 	struct sctp_association *asoc;
 	struct sctp_chunk *chunk;
 	unsigned int rmem_len;
-	__u32 ppid;
+	union {
+		__u32 mid;
+		__u16 ssn;
+	};
+	union {
+		__u32 ppid;
+		__u32 fsn;
+	};
 	__u32 tsn;
 	__u32 cumtsn;
 	__u16 stream;
-	__u16 ssn;
 	__u16 flags;
 	__u16 msg_flags;
-};
+} __packed;
 
 /* Retrieve the skb this event sits inside of. */
 static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
@@ -112,7 +122,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
 
 struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
 	const struct sctp_association *asoc,
-	__u32 indication, gfp_t gfp);
+	__u32 indication, __u32 sid, __u32 seq,
+	__u32 flags, gfp_t gfp);
 
 struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication(
 	const struct sctp_association *asoc, gfp_t gfp);
@@ -140,6 +151,10 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
 	const struct sctp_association *asoc, __u16 flags,
 	__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
 
+struct sctp_ulpevent *sctp_make_reassembled_event(
+	struct net *net, struct sk_buff_head *queue,
+	struct sk_buff *f_frag, struct sk_buff *l_frag);
+
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
 				   struct msghdr *);
 void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index e0dce07..bb0ecba 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -45,6 +45,7 @@ struct sctp_ulpq {
 	char pd_mode;
 	struct sctp_association *asoc;
 	struct sk_buff_head reasm;
+	struct sk_buff_head reasm_uo;
 	struct sk_buff_head lobby;
 };
 
@@ -76,11 +77,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn);
 
 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *, __u32);
+
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
+			    struct sk_buff_head *list, __u16 needed);
+
 #endif /* __sctp_ulpqueue_h__ */
-
-
-
-
-
-
-
diff --git a/include/net/sock.h b/include/net/sock.h
index 7a7b14e..73b7830 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -72,6 +72,7 @@
 #include <net/tcp_states.h>
 #include <linux/net_tstamp.h>
 #include <net/smc.h>
+#include <net/l3mdev.h>
 
 /*
  * This structure really needs to be cleaned up.
@@ -1262,6 +1263,7 @@ proto_memory_pressure(struct proto *prot)
 /* Called with local bh disabled */
 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
 int sock_prot_inuse_get(struct net *net, struct proto *proto);
+int sock_inuse_get(struct net *net);
 #else
 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
 		int inc)
@@ -2337,31 +2339,6 @@ static inline bool sk_listener(const struct sock *sk)
 	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
 }
 
-/**
- * sk_state_load - read sk->sk_state for lockless contexts
- * @sk: socket pointer
- *
- * Paired with sk_state_store(). Used in places we do not hold socket lock :
- * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
- */
-static inline int sk_state_load(const struct sock *sk)
-{
-	return smp_load_acquire(&sk->sk_state);
-}
-
-/**
- * sk_state_store - update sk->sk_state
- * @sk: socket pointer
- * @newstate: new state
- *
- * Paired with sk_state_load(). Should be used in contexts where
- * state change might impact lockless readers.
- */
-static inline void sk_state_store(struct sock *sk, int newstate)
-{
-	smp_store_release(&sk->sk_state, newstate);
-}
-
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
@@ -2412,4 +2389,34 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
 	return *proto->sysctl_rmem;
 }
 
+/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+ * Some wifi drivers need to tweak it to get more chunks.
+ * They can use this helper from their ndo_start_xmit()
+ */
+static inline void sk_pacing_shift_update(struct sock *sk, int val)
+{
+	if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+		return;
+	sk->sk_pacing_shift = val;
+}
+
+/* if a socket is bound to a device, check that the given device
+ * index is either the same or that the socket is bound to an L3
+ * master device and the given device index is also enslaved to
+ * that L3 master
+ */
+static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
+{
+	int mdif;
+
+	if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
+		return true;
+
+	mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
+	if (mdif && mdif == sk->sk_bound_dev_if)
+		return true;
+
+	return false;
+}
+
 #endif	/* _SOCK_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 21d253c..a2e9cbc 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,10 +8,8 @@
 struct tcf_mirred {
 	struct tc_action	common;
 	int			tcfm_eaction;
-	int			tcfm_ifindex;
 	bool			tcfm_mac_header_xmit;
 	struct net_device __rcu	*tcfm_dev;
-	struct net		*net;
 	struct list_head	tcfm_list;
 };
 #define to_mirred(a) ((struct tcf_mirred *)a)
@@ -34,9 +32,9 @@ static inline bool is_tcf_mirred_egress_mirror(const struct tc_action *a)
 	return false;
 }
 
-static inline int tcf_mirred_ifindex(const struct tc_action *a)
+static inline struct net_device *tcf_mirred_dev(const struct tc_action *a)
 {
-	return to_mirred(a)->tcfm_ifindex;
+	return rtnl_dereference(to_mirred(a)->tcfm_dev);
 }
 
 #endif /* __NET_TC_MIR_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6da880d..6939e69 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1507,8 +1507,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
 
 /* From tcp_fastopen.c */
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
-			    unsigned long *last_syn_loss);
+			    struct tcp_fastopen_cookie *cookie);
 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
 			    u16 try_exp);
@@ -1546,7 +1545,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
 void tcp_fastopen_active_disable(struct sock *sk);
 bool tcp_fastopen_active_should_disable(struct sock *sk);
 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
-void tcp_fastopen_active_timeout_reset(void);
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
 
 /* Latencies incurred by various limits for a sender. They are
  * chronograph-like stats that are mutually exclusive.
@@ -2011,10 +2010,12 @@ static inline int tcp_call_bpf(struct sock *sk, int op)
 	struct bpf_sock_ops_kern sock_ops;
 	int ret;
 
-	if (sk_fullsock(sk))
-		sock_owned_by_me(sk);
-
 	memset(&sock_ops, 0, sizeof(sock_ops));
+	if (sk_fullsock(sk)) {
+		sock_ops.is_fullsock = 1;
+		sock_owned_by_me(sk);
+	}
+
 	sock_ops.sk = sk;
 	sock_ops.op = op;
 
diff --git a/include/net/xdp.h b/include/net/xdp.h
new file mode 100644
index 0000000..b2362dd
--- /dev/null
+++ b/include/net/xdp.h
@@ -0,0 +1,48 @@
+/* include/net/xdp.h
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2.  See COPYING.
+ */
+#ifndef __LINUX_NET_XDP_H__
+#define __LINUX_NET_XDP_H__
+
+/**
+ * DOC: XDP RX-queue information
+ *
+ * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
+ * level RX-ring queues.  It is information that is specific to how
+ * the driver have configured a given RX-ring queue.
+ *
+ * Each xdp_buff frame received in the driver carry a (pointer)
+ * reference to this xdp_rxq_info structure.  This provides the XDP
+ * data-path read-access to RX-info for both kernel and bpf-side
+ * (limited subset).
+ *
+ * For now, direct access is only safe while running in NAPI/softirq
+ * context.  Contents is read-mostly and must not be updated during
+ * driver NAPI/softirq poll.
+ *
+ * The driver usage API is a register and unregister API.
+ *
+ * The struct is not directly tied to the XDP prog.  A new XDP prog
+ * can be attached as long as it doesn't change the underlying
+ * RX-ring.  If the RX-ring does change significantly, the NIC driver
+ * naturally need to stop the RX-ring before purging and reallocating
+ * memory.  In that process the driver MUST call unregistor (which
+ * also apply for driver shutdown and unload).  The register API is
+ * also mandatory during RX-ring setup.
+ */
+
+struct xdp_rxq_info {
+	struct net_device *dev;
+	u32 queue_index;
+	u32 reg_state;
+} ____cacheline_aligned; /* perf critical, avoid false-sharing */
+
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+		     struct net_device *dev, u32 queue_index);
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
+
+#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ae35991..2e6d4fe 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -968,7 +968,7 @@ static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_c
 
 /* A struct encoding bundle of transformations to apply to some set of flow.
  *
- * dst->child points to the next element of bundle.
+ * xdst->child points to the next element of bundle.
  * dst->xfrm  points to an instanse of transformer.
  *
  * Due to unfortunate limitations of current routing cache, which we
@@ -984,6 +984,8 @@ struct xfrm_dst {
 		struct rt6_info		rt6;
 	} u;
 	struct dst_entry *route;
+	struct dst_entry *child;
+	struct dst_entry *path;
 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
 	int num_pols, num_xfrms;
 	u32 xfrm_genid;
@@ -994,7 +996,35 @@ struct xfrm_dst {
 	u32 path_cookie;
 };
 
+static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
+{
 #ifdef CONFIG_XFRM
+	if (dst->xfrm) {
+		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
+
+		return xdst->path;
+	}
+#endif
+	return (struct dst_entry *) dst;
+}
+
+static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
+{
+#ifdef CONFIG_XFRM
+	if (dst->xfrm) {
+		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
+		return xdst->child;
+	}
+#endif
+	return NULL;
+}
+
+#ifdef CONFIG_XFRM
+static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
+{
+	xdst->child = child;
+}
+
 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
 {
 	xfrm_pols_put(xdst->pols, xdst->num_pols);
@@ -1021,6 +1051,7 @@ struct xfrm_offload {
 #define	XFRM_GSO_SEGMENT	16
 #define	XFRM_GRO		32
 #define	XFRM_ESP_NO_TRAILER	64
+#define	XFRM_DEV_RESUME		128
 
 	__u32			status;
 #define CRYPTO_SUCCESS				1
@@ -1847,21 +1878,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
 {
 	return skb->sp->xvec[skb->sp->len - 1];
 }
+#endif
+
 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
 {
+#ifdef CONFIG_XFRM
 	struct sec_path *sp = skb->sp;
 
 	if (!sp || !sp->olen || sp->len != sp->olen)
 		return NULL;
 
 	return &sp->ovec[sp->olen - 1];
-}
+#else
+	return NULL;
 #endif
+}
 
 void __net_init xfrm_dev_init(void);
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+void xfrm_dev_resume(struct sk_buff *skb);
+void xfrm_dev_backlog(struct softnet_data *sd);
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 		       struct xfrm_user_offload *xuo);
 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
@@ -1869,12 +1907,16 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
 {
 	struct xfrm_state *x = dst->xfrm;
+	struct xfrm_dst *xdst;
 
 	if (!x || !x->type_offload)
 		return false;
 
-	if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) &&
-	    !dst->child->xfrm)
+	xdst = (struct xfrm_dst *) dst;
+	if (!x->xso.offload_handle && !xdst->child->xfrm)
+		return true;
+	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
+	    !xdst->child->xfrm)
 		return true;
 
 	return false;
@@ -1894,15 +1936,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
 	 struct net_device *dev = xso->dev;
 
 	if (dev && dev->xfrmdev_ops) {
-		dev->xfrmdev_ops->xdo_dev_state_free(x);
+		if (dev->xfrmdev_ops->xdo_dev_state_free)
+			dev->xfrmdev_ops->xdo_dev_state_free(x);
 		xso->dev = NULL;
 		dev_put(dev);
 	}
 }
 #else
-static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+static inline void xfrm_dev_resume(struct sk_buff *skb)
 {
-	return 0;
+}
+
+static inline void xfrm_dev_backlog(struct softnet_data *sd)
+{
+}
+
+static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
+{
+	return skb;
 }
 
 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 1bee3e7..8ea9664 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -82,8 +82,8 @@ TRACE_EVENT(fdb_delete,
 	TP_fast_assign(
 		__assign_str(br_dev, br->dev->name);
 		__assign_str(dev, f->dst ? f->dst->dev->name : "null");
-		memcpy(__entry->addr, f->addr.addr, ETH_ALEN);
-		__entry->vid = f->vlan_id;
+		memcpy(__entry->addr, f->key.addr.addr, ETH_ALEN);
+		__entry->vid = f->key.vlan_id;
 	),
 
 	TP_printk("br_dev %s dev %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u",
diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h
new file mode 100644
index 0000000..3930119
--- /dev/null
+++ b/include/trace/events/net_probe_common.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NET_PROBE_COMMON_H
+
+#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk)			\
+	do {								\
+		struct sockaddr_in *v4 = (void *)__entry->saddr;	\
+									\
+		v4->sin_family = AF_INET;				\
+		v4->sin_port = inet->inet_sport;			\
+		v4->sin_addr.s_addr = inet->inet_saddr;			\
+		v4 = (void *)__entry->daddr;				\
+		v4->sin_family = AF_INET;				\
+		v4->sin_port = inet->inet_dport;			\
+		v4->sin_addr.s_addr = inet->inet_daddr;			\
+	} while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk)				\
+	do {								\
+		if (sk->sk_family == AF_INET6) {			\
+			struct sockaddr_in6 *v6 = (void *)__entry->saddr; \
+									\
+			v6->sin6_family = AF_INET6;			\
+			v6->sin6_port = inet->inet_sport;		\
+			v6->sin6_addr = inet6_sk(sk)->saddr;		\
+			v6 = (void *)__entry->daddr;			\
+			v6->sin6_family = AF_INET6;			\
+			v6->sin6_port = inet->inet_dport;		\
+			v6->sin6_addr = sk->sk_v6_daddr;		\
+		} else							\
+			TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);	\
+	} while (0)
+
+#else
+
+#define TP_STORE_ADDR_PORTS(__entry, inet, sk)		\
+	TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);
+
+#endif
+
+#endif
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
new file mode 100644
index 0000000..7475c7b
--- /dev/null
+++ b/include/trace/events/sctp.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sctp
+
+#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCTP_H
+
+#include <net/sctp/structs.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sctp_probe_path,
+
+	TP_PROTO(struct sctp_transport *sp,
+		 const struct sctp_association *asoc),
+
+	TP_ARGS(sp, asoc),
+
+	TP_STRUCT__entry(
+		__field(__u64, asoc)
+		__field(__u32, primary)
+		__array(__u8, ipaddr, sizeof(union sctp_addr))
+		__field(__u32, state)
+		__field(__u32, cwnd)
+		__field(__u32, ssthresh)
+		__field(__u32, flight_size)
+		__field(__u32, partial_bytes_acked)
+		__field(__u32, pathmtu)
+	),
+
+	TP_fast_assign(
+		__entry->asoc = (unsigned long)asoc;
+		__entry->primary = (sp == asoc->peer.primary_path);
+		memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr));
+		__entry->state = sp->state;
+		__entry->cwnd = sp->cwnd;
+		__entry->ssthresh = sp->ssthresh;
+		__entry->flight_size = sp->flight_size;
+		__entry->partial_bytes_acked = sp->partial_bytes_acked;
+		__entry->pathmtu = sp->pathmtu;
+	),
+
+	TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u "
+		  "flight_size=%u partial_bytes_acked=%u pathmtu=%u",
+		  __entry->asoc, __entry->primary ? "(*)" : "",
+		  __entry->ipaddr, __entry->state, __entry->cwnd,
+		  __entry->ssthresh, __entry->flight_size,
+		  __entry->partial_bytes_acked, __entry->pathmtu)
+);
+
+TRACE_EVENT(sctp_probe,
+
+	TP_PROTO(const struct sctp_endpoint *ep,
+		 const struct sctp_association *asoc,
+		 struct sctp_chunk *chunk),
+
+	TP_ARGS(ep, asoc, chunk),
+
+	TP_STRUCT__entry(
+		__field(__u64, asoc)
+		__field(__u32, mark)
+		__field(__u16, bind_port)
+		__field(__u16, peer_port)
+		__field(__u32, pathmtu)
+		__field(__u32, rwnd)
+		__field(__u16, unack_data)
+	),
+
+	TP_fast_assign(
+		struct sk_buff *skb = chunk->skb;
+
+		__entry->asoc = (unsigned long)asoc;
+		__entry->mark = skb->mark;
+		__entry->bind_port = ep->base.bind_addr.port;
+		__entry->peer_port = asoc->peer.port;
+		__entry->pathmtu = asoc->pathmtu;
+		__entry->rwnd = asoc->peer.rwnd;
+		__entry->unack_data = asoc->unack_data;
+
+		if (trace_sctp_probe_path_enabled()) {
+			struct sctp_transport *sp;
+
+			list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+					    transports) {
+				trace_sctp_probe_path(sp, asoc);
+			}
+		}
+	),
+
+	TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
+		  "rwnd=%u unack_data=%d",
+		  __entry->asoc, __entry->mark, __entry->bind_port,
+		  __entry->peer_port, __entry->pathmtu, __entry->rwnd,
+		  __entry->unack_data)
+);
+
+#endif /* _TRACE_SCTP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index ec4dade..3176a39 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -6,7 +6,58 @@
 #define _TRACE_SOCK_H
 
 #include <net/sock.h>
+#include <net/ipv6.h>
 #include <linux/tracepoint.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+
+#define family_names			\
+		EM(AF_INET)				\
+		EMe(AF_INET6)
+
+/* The protocol traced by inet_sock_set_state */
+#define inet_protocol_names		\
+		EM(IPPROTO_TCP)			\
+		EM(IPPROTO_DCCP)		\
+		EMe(IPPROTO_SCTP)
+
+#define tcp_state_names			\
+		EM(TCP_ESTABLISHED)		\
+		EM(TCP_SYN_SENT)		\
+		EM(TCP_SYN_RECV)		\
+		EM(TCP_FIN_WAIT1)		\
+		EM(TCP_FIN_WAIT2)		\
+		EM(TCP_TIME_WAIT)		\
+		EM(TCP_CLOSE)			\
+		EM(TCP_CLOSE_WAIT)		\
+		EM(TCP_LAST_ACK)		\
+		EM(TCP_LISTEN)			\
+		EM(TCP_CLOSING)			\
+		EMe(TCP_NEW_SYN_RECV)
+
+/* enums need to be exported to user space */
+#undef EM
+#undef EMe
+#define EM(a)       TRACE_DEFINE_ENUM(a);
+#define EMe(a)      TRACE_DEFINE_ENUM(a);
+
+family_names
+inet_protocol_names
+tcp_state_names
+
+#undef EM
+#undef EMe
+#define EM(a)       { a, #a },
+#define EMe(a)      { a, #a }
+
+#define show_family_name(val)			\
+	__print_symbolic(val, family_names)
+
+#define show_inet_protocol_name(val)    \
+	__print_symbolic(val, inet_protocol_names)
+
+#define show_tcp_state_name(val)        \
+	__print_symbolic(val, tcp_state_names)
 
 TRACE_EVENT(sock_rcvqueue_full,
 
@@ -63,6 +114,72 @@ TRACE_EVENT(sock_exceed_buf_limit,
 		__entry->rmem_alloc)
 );
 
+TRACE_EVENT(inet_sock_set_state,
+
+	TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
+
+	TP_ARGS(sk, oldstate, newstate),
+
+	TP_STRUCT__entry(
+		__field(const void *, skaddr)
+		__field(int, oldstate)
+		__field(int, newstate)
+		__field(__u16, sport)
+		__field(__u16, dport)
+		__field(__u16, family)
+		__field(__u8, protocol)
+		__array(__u8, saddr, 4)
+		__array(__u8, daddr, 4)
+		__array(__u8, saddr_v6, 16)
+		__array(__u8, daddr_v6, 16)
+	),
+
+	TP_fast_assign(
+		struct inet_sock *inet = inet_sk(sk);
+		struct in6_addr *pin6;
+		__be32 *p32;
+
+		__entry->skaddr = sk;
+		__entry->oldstate = oldstate;
+		__entry->newstate = newstate;
+
+		__entry->family = sk->sk_family;
+		__entry->protocol = sk->sk_protocol;
+		__entry->sport = ntohs(inet->inet_sport);
+		__entry->dport = ntohs(inet->inet_dport);
+
+		p32 = (__be32 *) __entry->saddr;
+		*p32 = inet->inet_saddr;
+
+		p32 = (__be32 *) __entry->daddr;
+		*p32 =  inet->inet_daddr;
+
+#if IS_ENABLED(CONFIG_IPV6)
+		if (sk->sk_family == AF_INET6) {
+			pin6 = (struct in6_addr *)__entry->saddr_v6;
+			*pin6 = sk->sk_v6_rcv_saddr;
+			pin6 = (struct in6_addr *)__entry->daddr_v6;
+			*pin6 = sk->sk_v6_daddr;
+		} else
+#endif
+		{
+			pin6 = (struct in6_addr *)__entry->saddr_v6;
+			ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
+			pin6 = (struct in6_addr *)__entry->daddr_v6;
+			ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
+		}
+	),
+
+	TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
+			show_family_name(__entry->family),
+			show_inet_protocol_name(__entry->protocol),
+			__entry->sport, __entry->dport,
+			__entry->saddr, __entry->daddr,
+			__entry->saddr_v6, __entry->daddr_v6,
+			show_tcp_state_name(__entry->oldstate),
+			show_tcp_state_name(__entry->newstate))
+);
+
 #endif /* _TRACE_SOCK_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index ab34c56..878b2be 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM tcp
 
@@ -8,22 +9,7 @@
 #include <linux/tcp.h>
 #include <linux/tracepoint.h>
 #include <net/ipv6.h>
-
-#define tcp_state_name(state)	{ state, #state }
-#define show_tcp_state_name(val)			\
-	__print_symbolic(val,				\
-		tcp_state_name(TCP_ESTABLISHED),	\
-		tcp_state_name(TCP_SYN_SENT),		\
-		tcp_state_name(TCP_SYN_RECV),		\
-		tcp_state_name(TCP_FIN_WAIT1),		\
-		tcp_state_name(TCP_FIN_WAIT2),		\
-		tcp_state_name(TCP_TIME_WAIT),		\
-		tcp_state_name(TCP_CLOSE),		\
-		tcp_state_name(TCP_CLOSE_WAIT),		\
-		tcp_state_name(TCP_LAST_ACK),		\
-		tcp_state_name(TCP_LISTEN),		\
-		tcp_state_name(TCP_CLOSING),		\
-		tcp_state_name(TCP_NEW_SYN_RECV))
+#include <net/tcp.h>
 
 #define TP_STORE_V4MAPPED(__entry, saddr, daddr)		\
 	do {							\
@@ -270,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack,
 		  __entry->saddr_v6, __entry->daddr_v6)
 );
 
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(tcp_probe,
+
+	TP_PROTO(struct sock *sk, struct sk_buff *skb),
+
+	TP_ARGS(sk, skb),
+
+	TP_STRUCT__entry(
+		/* sockaddr_in6 is always bigger than sockaddr_in */
+		__array(__u8, saddr, sizeof(struct sockaddr_in6))
+		__array(__u8, daddr, sizeof(struct sockaddr_in6))
+		__field(__u16, sport)
+		__field(__u16, dport)
+		__field(__u32, mark)
+		__field(__u16, length)
+		__field(__u32, snd_nxt)
+		__field(__u32, snd_una)
+		__field(__u32, snd_cwnd)
+		__field(__u32, ssthresh)
+		__field(__u32, snd_wnd)
+		__field(__u32, srtt)
+		__field(__u32, rcv_wnd)
+	),
+
+	TP_fast_assign(
+		const struct tcp_sock *tp = tcp_sk(sk);
+		const struct inet_sock *inet = inet_sk(sk);
+
+		memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+		memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+		TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+		/* For filtering use */
+		__entry->sport = ntohs(inet->inet_sport);
+		__entry->dport = ntohs(inet->inet_dport);
+		__entry->mark = skb->mark;
+
+		__entry->length = skb->len;
+		__entry->snd_nxt = tp->snd_nxt;
+		__entry->snd_una = tp->snd_una;
+		__entry->snd_cwnd = tp->snd_cwnd;
+		__entry->snd_wnd = tp->snd_wnd;
+		__entry->rcv_wnd = tp->rcv_wnd;
+		__entry->ssthresh = tcp_current_ssthresh(sk);
+		__entry->srtt = tp->srtt_us >> 3;
+	),
+
+	TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x "
+		  "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u "
+		  "rcv_wnd=%u",
+		  __entry->saddr, __entry->daddr, __entry->mark,
+		  __entry->length, __entry->snd_nxt, __entry->snd_una,
+		  __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd,
+		  __entry->srtt, __entry->rcv_wnd)
+);
+
 #endif /* _TRACE_TCP_H */
 
 /* This part must be outside protection */
diff --git a/net/batman-adv/packet.h b/include/uapi/linux/batadv_packet.h
similarity index 79%
rename from net/batman-adv/packet.h
rename to include/uapi/linux/batadv_packet.h
index 8e8a5db..5cb360b 100644
--- a/net/batman-adv/packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -15,13 +16,20 @@
  * along with this program; if not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef _NET_BATMAN_ADV_PACKET_H_
-#define _NET_BATMAN_ADV_PACKET_H_
+#ifndef _UAPI_LINUX_BATADV_PACKET_H_
+#define _UAPI_LINUX_BATADV_PACKET_H_
 
 #include <asm/byteorder.h>
+#include <linux/if_ether.h>
 #include <linux/types.h>
 
-#define batadv_tp_is_error(n) ((u8)(n) > 127 ? 1 : 0)
+/**
+ * batadv_tp_is_error() - Check throughput meter return code for error
+ * @n: throughput meter return code
+ *
+ * Return: 0 when not error was detected, != 0 otherwise
+ */
+#define batadv_tp_is_error(n) ((__u8)(n) > 127 ? 1 : 0)
 
 /**
  * enum batadv_packettype - types for batman-adv encapsulated packets
@@ -83,12 +91,20 @@ enum batadv_subtype {
  *     one hop neighbor on the interface where it was originally received.
  */
 enum batadv_iv_flags {
-	BATADV_NOT_BEST_NEXT_HOP   = BIT(0),
-	BATADV_PRIMARIES_FIRST_HOP = BIT(1),
-	BATADV_DIRECTLINK          = BIT(2),
+	BATADV_NOT_BEST_NEXT_HOP   = 1UL << 0,
+	BATADV_PRIMARIES_FIRST_HOP = 1UL << 1,
+	BATADV_DIRECTLINK          = 1UL << 2,
 };
 
-/* ICMP message types */
+/**
+ * enum batadv_icmp_packettype - ICMP message types
+ * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
+ * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
+ * @BATADV_ECHO_REQUEST: request BATADV_ECHO_REPLY from destination
+ * @BATADV_TTL_EXCEEDED: error after BATADV_ECHO_REQUEST traversed too many hops
+ * @BATADV_PARAMETER_PROBLEM: return code for malformed messages
+ * @BATADV_TP: throughput meter packet
+ */
 enum batadv_icmp_packettype {
 	BATADV_ECHO_REPLY	       = 0,
 	BATADV_DESTINATION_UNREACHABLE = 3,
@@ -106,9 +122,9 @@ enum batadv_icmp_packettype {
  * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
  */
 enum batadv_mcast_flags {
-	BATADV_MCAST_WANT_ALL_UNSNOOPABLES	= BIT(0),
-	BATADV_MCAST_WANT_ALL_IPV4		= BIT(1),
-	BATADV_MCAST_WANT_ALL_IPV6		= BIT(2),
+	BATADV_MCAST_WANT_ALL_UNSNOOPABLES	= 1UL << 0,
+	BATADV_MCAST_WANT_ALL_IPV4		= 1UL << 1,
+	BATADV_MCAST_WANT_ALL_IPV6		= 1UL << 2,
 };
 
 /* tt data subtypes */
@@ -122,10 +138,10 @@ enum batadv_mcast_flags {
  * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
  */
 enum batadv_tt_data_flags {
-	BATADV_TT_OGM_DIFF   = BIT(0),
-	BATADV_TT_REQUEST    = BIT(1),
-	BATADV_TT_RESPONSE   = BIT(2),
-	BATADV_TT_FULL_TABLE = BIT(4),
+	BATADV_TT_OGM_DIFF   = 1UL << 0,
+	BATADV_TT_REQUEST    = 1UL << 1,
+	BATADV_TT_RESPONSE   = 1UL << 2,
+	BATADV_TT_FULL_TABLE = 1UL << 4,
 };
 
 /**
@@ -133,10 +149,17 @@ enum batadv_tt_data_flags {
  * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
  */
 enum batadv_vlan_flags {
-	BATADV_VLAN_HAS_TAG	= BIT(15),
+	BATADV_VLAN_HAS_TAG	= 1UL << 15,
 };
 
-/* claim frame types for the bridge loop avoidance */
+/**
+ * enum batadv_bla_claimframe - claim frame types for the bridge loop avoidance
+ * @BATADV_CLAIM_TYPE_CLAIM: claim of a client mac address
+ * @BATADV_CLAIM_TYPE_UNCLAIM: unclaim of a client mac address
+ * @BATADV_CLAIM_TYPE_ANNOUNCE: announcement of backbone with current crc
+ * @BATADV_CLAIM_TYPE_REQUEST: request of full claim table
+ * @BATADV_CLAIM_TYPE_LOOPDETECT: mesh-traversing loop detect packet
+ */
 enum batadv_bla_claimframe {
 	BATADV_CLAIM_TYPE_CLAIM		= 0x00,
 	BATADV_CLAIM_TYPE_UNCLAIM	= 0x01,
@@ -168,8 +191,8 @@ enum batadv_tvlv_type {
  * transport the claim type and the group id
  */
 struct batadv_bla_claim_dst {
-	u8     magic[3];	/* FF:43:05 */
-	u8     type;		/* bla_claimframe */
+	__u8   magic[3];	/* FF:43:05 */
+	__u8   type;		/* bla_claimframe */
 	__be16 group;		/* group id */
 };
 
@@ -189,15 +212,15 @@ struct batadv_bla_claim_dst {
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     flags;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   flags;
 	__be32 seqno;
-	u8     orig[ETH_ALEN];
-	u8     prev_sender[ETH_ALEN];
-	u8     reserved;
-	u8     tq;
+	__u8   orig[ETH_ALEN];
+	__u8   prev_sender[ETH_ALEN];
+	__u8   reserved;
+	__u8   tq;
 	__be16 tvlv_len;
 	/* __packed is not needed as the struct size is divisible by 4,
 	 * and the largest data type in this struct has a size of 4.
@@ -218,12 +241,12 @@ struct batadv_ogm_packet {
  * @throughput: the currently flooded path throughput
  */
 struct batadv_ogm2_packet {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     flags;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   flags;
 	__be32 seqno;
-	u8     orig[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
 	__be16 tvlv_len;
 	__be32 throughput;
 	/* __packed is not needed as the struct size is divisible by 4,
@@ -242,9 +265,9 @@ struct batadv_ogm2_packet {
  * @elp_interval: currently used ELP sending interval in ms
  */
 struct batadv_elp_packet {
-	u8     packet_type;
-	u8     version;
-	u8     orig[ETH_ALEN];
+	__u8   packet_type;
+	__u8   version;
+	__u8   orig[ETH_ALEN];
 	__be32 seqno;
 	__be32 elp_interval;
 };
@@ -267,14 +290,14 @@ struct batadv_elp_packet {
  * members are padded the same way as they are in real packets.
  */
 struct batadv_icmp_header {
-	u8 packet_type;
-	u8 version;
-	u8 ttl;
-	u8 msg_type; /* see ICMP message types above */
-	u8 dst[ETH_ALEN];
-	u8 orig[ETH_ALEN];
-	u8 uid;
-	u8 align[3];
+	__u8 packet_type;
+	__u8 version;
+	__u8 ttl;
+	__u8 msg_type; /* see ICMP message types above */
+	__u8 dst[ETH_ALEN];
+	__u8 orig[ETH_ALEN];
+	__u8 uid;
+	__u8 align[3];
 };
 
 /**
@@ -290,14 +313,14 @@ struct batadv_icmp_header {
  * @seqno: ICMP sequence number
  */
 struct batadv_icmp_packet {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     msg_type; /* see ICMP message types above */
-	u8     dst[ETH_ALEN];
-	u8     orig[ETH_ALEN];
-	u8     uid;
-	u8     reserved;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   msg_type; /* see ICMP message types above */
+	__u8   dst[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
+	__u8   uid;
+	__u8   reserved;
 	__be16 seqno;
 };
 
@@ -319,15 +342,15 @@ struct batadv_icmp_packet {
  *  store it using network order
  */
 struct batadv_icmp_tp_packet {
-	u8  packet_type;
-	u8  version;
-	u8  ttl;
-	u8  msg_type; /* see ICMP message types above */
-	u8  dst[ETH_ALEN];
-	u8  orig[ETH_ALEN];
-	u8  uid;
-	u8  subtype;
-	u8  session[2];
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   msg_type; /* see ICMP message types above */
+	__u8   dst[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
+	__u8   uid;
+	__u8   subtype;
+	__u8   session[2];
 	__be32 seqno;
 	__be32 timestamp;
 };
@@ -358,16 +381,16 @@ enum batadv_icmp_tp_subtype {
  * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-	u8     packet_type;
-	u8     version;
-	u8     ttl;
-	u8     msg_type; /* see ICMP message types above */
-	u8     dst[ETH_ALEN];
-	u8     orig[ETH_ALEN];
-	u8     uid;
-	u8     rr_cur;
+	__u8   packet_type;
+	__u8   version;
+	__u8   ttl;
+	__u8   msg_type; /* see ICMP message types above */
+	__u8   dst[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
+	__u8   uid;
+	__u8   rr_cur;
 	__be16 seqno;
-	u8     rr[BATADV_RR_LEN][ETH_ALEN];
+	__u8   rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
 #define BATADV_ICMP_MAX_PACKET_SIZE	sizeof(struct batadv_icmp_packet_rr)
@@ -393,11 +416,11 @@ struct batadv_icmp_packet_rr {
  * @dest: originator destination of the unicast packet
  */
 struct batadv_unicast_packet {
-	u8 packet_type;
-	u8 version;
-	u8 ttl;
-	u8 ttvn; /* destination translation table version number */
-	u8 dest[ETH_ALEN];
+	__u8 packet_type;
+	__u8 version;
+	__u8 ttl;
+	__u8 ttvn; /* destination translation table version number */
+	__u8 dest[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -412,9 +435,9 @@ struct batadv_unicast_packet {
  */
 struct batadv_unicast_4addr_packet {
 	struct batadv_unicast_packet u;
-	u8 src[ETH_ALEN];
-	u8 subtype;
-	u8 reserved;
+	__u8 src[ETH_ALEN];
+	__u8 subtype;
+	__u8 reserved;
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -434,22 +457,22 @@ struct batadv_unicast_4addr_packet {
  * @total_size: size of the merged packet
  */
 struct batadv_frag_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
 #if defined(__BIG_ENDIAN_BITFIELD)
-	u8     no:4;
-	u8     priority:3;
-	u8     reserved:1;
+	__u8   no:4;
+	__u8   priority:3;
+	__u8   reserved:1;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-	u8     reserved:1;
-	u8     priority:3;
-	u8     no:4;
+	__u8   reserved:1;
+	__u8   priority:3;
+	__u8   no:4;
 #else
 #error "unknown bitfield endianness"
 #endif
-	u8     dest[ETH_ALEN];
-	u8     orig[ETH_ALEN];
+	__u8   dest[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
 	__be16 seqno;
 	__be16 total_size;
 };
@@ -464,12 +487,12 @@ struct batadv_frag_packet {
  * @orig: originator of the broadcast packet
  */
 struct batadv_bcast_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
-	u8     reserved;
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
+	__u8   reserved;
 	__be32 seqno;
-	u8     orig[ETH_ALEN];
+	__u8   orig[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -493,19 +516,19 @@ struct batadv_bcast_packet {
  * @coded_len: length of network coded part of the payload
  */
 struct batadv_coded_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
-	u8     first_ttvn;
-	/* u8  first_dest[ETH_ALEN]; - saved in mac header destination */
-	u8     first_source[ETH_ALEN];
-	u8     first_orig_dest[ETH_ALEN];
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
+	__u8   first_ttvn;
+	/* __u8 first_dest[ETH_ALEN]; - saved in mac header destination */
+	__u8   first_source[ETH_ALEN];
+	__u8   first_orig_dest[ETH_ALEN];
 	__be32 first_crc;
-	u8     second_ttl;
-	u8     second_ttvn;
-	u8     second_dest[ETH_ALEN];
-	u8     second_source[ETH_ALEN];
-	u8     second_orig_dest[ETH_ALEN];
+	__u8   second_ttl;
+	__u8   second_ttvn;
+	__u8   second_dest[ETH_ALEN];
+	__u8   second_source[ETH_ALEN];
+	__u8   second_orig_dest[ETH_ALEN];
 	__be32 second_crc;
 	__be16 coded_len;
 };
@@ -524,14 +547,14 @@ struct batadv_coded_packet {
  * @align: 2 bytes to align the header to a 4 byte boundary
  */
 struct batadv_unicast_tvlv_packet {
-	u8     packet_type;
-	u8     version;  /* batman version field */
-	u8     ttl;
-	u8     reserved;
-	u8     dst[ETH_ALEN];
-	u8     src[ETH_ALEN];
+	__u8   packet_type;
+	__u8   version;  /* batman version field */
+	__u8   ttl;
+	__u8   reserved;
+	__u8   dst[ETH_ALEN];
+	__u8   src[ETH_ALEN];
 	__be16 tvlv_len;
-	u16    align;
+	__u16  align;
 };
 
 /**
@@ -541,8 +564,8 @@ struct batadv_unicast_tvlv_packet {
  * @len: tvlv container length
  */
 struct batadv_tvlv_hdr {
-	u8     type;
-	u8     version;
+	__u8   type;
+	__u8   version;
 	__be16 len;
 };
 
@@ -565,8 +588,8 @@ struct batadv_tvlv_gateway_data {
  *  one batadv_tvlv_tt_vlan_data object per announced vlan
  */
 struct batadv_tvlv_tt_data {
-	u8     flags;
-	u8     ttvn;
+	__u8   flags;
+	__u8   ttvn;
 	__be16 num_vlan;
 };
 
@@ -580,7 +603,7 @@ struct batadv_tvlv_tt_data {
 struct batadv_tvlv_tt_vlan_data {
 	__be32 crc;
 	__be16 vid;
-	u16    reserved;
+	__u16  reserved;
 };
 
 /**
@@ -592,9 +615,9 @@ struct batadv_tvlv_tt_vlan_data {
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_tt_change {
-	u8     flags;
-	u8     reserved[3];
-	u8     addr[ETH_ALEN];
+	__u8   flags;
+	__u8   reserved[3];
+	__u8   addr[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -604,7 +627,7 @@ struct batadv_tvlv_tt_change {
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_roam_adv {
-	u8     client[ETH_ALEN];
+	__u8   client[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -614,8 +637,8 @@ struct batadv_tvlv_roam_adv {
  * @reserved: reserved field
  */
 struct batadv_tvlv_mcast_data {
-	u8 flags;
-	u8 reserved[3];
+	__u8 flags;
+	__u8 reserved[3];
 };
 
-#endif /* _NET_BATMAN_ADV_PACKET_H_ */
+#endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index efd641c..ae00c99 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,18 +1,25 @@
+/* SPDX-License-Identifier: MIT */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
  *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
  */
 
 #ifndef _UAPI_LINUX_BATMAN_ADV_H_
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4c223ab..405317f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -197,8 +197,14 @@ enum bpf_attach_type {
  */
 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
 
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
 #define BPF_PSEUDO_MAP_FD	1
 
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL		1
+
 /* flags for BPF_MAP_UPDATE_ELEM command */
 #define BPF_ANY		0 /* create new element or update existing */
 #define BPF_NOEXIST	1 /* create new element if it didn't exist */
@@ -677,6 +683,10 @@ union bpf_attr {
  *     @buf: buf to fill
  *     @buf_size: size of the buf
  *     Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ *	@pt_regs: pointer to struct pt_regs
+ *	@rc: the return value to set
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -736,7 +746,8 @@ union bpf_attr {
 	FN(xdp_adjust_meta),		\
 	FN(perf_event_read_value),	\
 	FN(perf_prog_read_value),	\
-	FN(getsockopt),
+	FN(getsockopt),			\
+	FN(override_return),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -888,6 +899,9 @@ struct xdp_md {
 	__u32 data;
 	__u32 data_end;
 	__u32 data_meta;
+	/* Below access go though struct xdp_rxq_info */
+	__u32 ingress_ifindex; /* rxq->dev->ifindex */
+	__u32 rx_queue_index;  /* rxq->queue_index  */
 };
 
 enum sk_action {
@@ -910,6 +924,9 @@ struct bpf_prog_info {
 	__u32 nr_map_ids;
 	__aligned_u64 map_ids;
 	char name[BPF_OBJ_NAME_LEN];
+	__u32 ifindex;
+	__u64 netns_dev;
+	__u64 netns_ino;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
@@ -941,6 +958,12 @@ struct bpf_sock_ops {
 	__u32 local_ip6[4];	/* Stored in network byte order */
 	__u32 remote_port;	/* Stored in network byte order */
 	__u32 local_port;	/* stored in host byte order */
+	__u32 is_fullsock;	/* Some TCP fields are only valid if
+				 * there is a full socket. If not, the
+				 * fields read as zero.
+				 */
+	__u32 snd_cwnd;
+	__u32 srtt_us;		/* Averaged RTT << 3 in usecs */
 };
 
 /* List of known BPF sock_ops operators.
@@ -995,7 +1018,8 @@ struct bpf_perf_event_value {
 #define BPF_DEVCG_DEV_CHAR	(1ULL << 1)
 
 struct bpf_cgroup_dev_ctx {
-	__u32 access_type; /* (access << 16) | type */
+	/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
+	__u32 access_type;
 	__u32 major;
 	__u32 minor;
 };
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index ac71559..44a0b67 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1686,6 +1686,7 @@ enum ethtool_reset_flags {
 	ETH_RESET_PHY		= 1 << 6,	/* Transceiver/PHY */
 	ETH_RESET_RAM		= 1 << 7,	/* RAM shared between
 						 * multiple components */
+	ETH_RESET_AP		= 1 << 8,	/* Application processor */
 
 	ETH_RESET_DEDICATED	= 0x0000ffff,	/* All components dedicated to
 						 * this interface */
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 144de4d..f8cb576 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -48,6 +48,7 @@
 #define ETH_P_PUP	0x0200		/* Xerox PUP packet		*/
 #define ETH_P_PUPAT	0x0201		/* Xerox PUP Addr Trans packet	*/
 #define ETH_P_TSN	0x22F0		/* TSN (IEEE 1722) packet	*/
+#define ETH_P_ERSPAN2	0x22EB		/* ERSPAN version 2 (type III)	*/
 #define ETH_P_IP	0x0800		/* Internet Protocol packet	*/
 #define ETH_P_X25	0x0805		/* CCITT X.25			*/
 #define ETH_P_ARP	0x0806		/* Address Resolution packet	*/
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 19fc026..f8f04fe 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -732,6 +732,8 @@ enum {
 	IFLA_VF_STATS_BROADCAST,
 	IFLA_VF_STATS_MULTICAST,
 	IFLA_VF_STATS_PAD,
+	IFLA_VF_STATS_RX_DROPPED,
+	IFLA_VF_STATS_TX_DROPPED,
 	__IFLA_VF_STATS_MAX,
 };
 
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 719d243..2e52283 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -18,12 +18,17 @@
 #define MACSEC_GENL_NAME "macsec"
 #define MACSEC_GENL_VERSION 1
 
-#define MACSEC_MAX_KEY_LEN 128
+#define MACSEC_MAX_KEY_LEN 256
 
 #define MACSEC_KEYID_LEN 16
 
-#define MACSEC_DEFAULT_CIPHER_ID   0x0080020001000001ULL
-#define MACSEC_DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
+/* cipher IDs as per IEEE802.1AEbn-2011 */
+#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
+#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
+
+#define MACSEC_DEFAULT_CIPHER_ID     MACSEC_CIPHER_ID_GCM_AES_128
+/* deprecated cipher ID for GCM-AES-128 */
+#define MACSEC_DEFAULT_CIPHER_ALT    0x0080020001000001ULL
 
 #define MACSEC_MIN_ICV_LEN 8
 #define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 030d3e6..fb38c17 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -57,6 +57,7 @@
  */
 #define TUNSETVNETBE _IOW('T', 222, int)
 #define TUNGETVNETBE _IOR('T', 223, int)
+#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index e68dadb..1b3d148 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -137,6 +137,9 @@ enum {
 	IFLA_GRE_IGNORE_DF,
 	IFLA_GRE_FWMARK,
 	IFLA_GRE_ERSPAN_INDEX,
+	IFLA_GRE_ERSPAN_VER,
+	IFLA_GRE_ERSPAN_DIR,
+	IFLA_GRE_ERSPAN_HWID,
 	__IFLA_GRE_MAX,
 };
 
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 817d807..14565d7 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -92,6 +92,8 @@ enum {
 	INET_DIAG_BC_D_COND,
 	INET_DIAG_BC_DEV_COND,   /* u32 ifindex */
 	INET_DIAG_BC_MARK_COND,
+	INET_DIAG_BC_S_EQ,
+	INET_DIAG_BC_D_EQ,
 };
 
 struct inet_diag_hostcond {
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index d84ce5c..71e6279 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -65,7 +65,7 @@ struct sockaddr_l2tpip6 {
  * TUNNEL_MODIFY	- CONN_ID, udpcsum
  * TUNNEL_GETSTATS	- CONN_ID, (stats)
  * TUNNEL_GET		- CONN_ID, (...)
- * SESSION_CREATE	- SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec
+ * SESSION_CREATE	- SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
  * SESSION_DELETE	- SESSION_ID
  * SESSION_MODIFY	- SESSION_ID, data_seq
  * SESSION_GET		- SESSION_ID, (...)
@@ -94,7 +94,7 @@ enum {
 	L2TP_ATTR_NONE,			/* no data */
 	L2TP_ATTR_PW_TYPE,		/* u16, enum l2tp_pwtype */
 	L2TP_ATTR_ENCAP_TYPE,		/* u16, enum l2tp_encap_type */
-	L2TP_ATTR_OFFSET,		/* u16 */
+	L2TP_ATTR_OFFSET,		/* u16 (not used) */
 	L2TP_ATTR_DATA_SEQ,		/* u16 */
 	L2TP_ATTR_L2SPEC_TYPE,		/* u8, enum l2tp_l2spec_type */
 	L2TP_ATTR_L2SPEC_LEN,		/* u8, enum l2tp_l2spec_type */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 57ccfb3..9574bd4 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -101,12 +101,16 @@ enum ip_conntrack_status {
 	IPS_HELPER_BIT = 13,
 	IPS_HELPER = (1 << IPS_HELPER_BIT),
 
+	/* Conntrack has been offloaded to flow table. */
+	IPS_OFFLOAD_BIT = 14,
+	IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT),
+
 	/* Be careful here, modifying these bits can make things messy,
 	 * so don't let users modify them directly.
 	 */
 	IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
 				 IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
-				 IPS_SEQ_ADJUST | IPS_TEMPLATE),
+				 IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
 
 	__IPS_MAX_BIT = 14,
 };
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a3ee277..53e8dd2 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -92,6 +92,9 @@ enum nft_verdicts {
  * @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes)
  * @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes)
  * @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes)
+ * @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes)
+ * @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes)
  */
 enum nf_tables_msg_types {
 	NFT_MSG_NEWTABLE,
@@ -116,6 +119,9 @@ enum nf_tables_msg_types {
 	NFT_MSG_GETOBJ,
 	NFT_MSG_DELOBJ,
 	NFT_MSG_GETOBJ_RESET,
+	NFT_MSG_NEWFLOWTABLE,
+	NFT_MSG_GETFLOWTABLE,
+	NFT_MSG_DELFLOWTABLE,
 	NFT_MSG_MAX,
 };
 
@@ -777,6 +783,7 @@ enum nft_exthdr_attributes {
  * @NFT_META_OIFGROUP: packet output interface group
  * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid)
  * @NFT_META_PRANDOM: a 32bit pseudo-random number
+ * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp)
  */
 enum nft_meta_keys {
 	NFT_META_LEN,
@@ -804,6 +811,7 @@ enum nft_meta_keys {
 	NFT_META_OIFGROUP,
 	NFT_META_CGROUP,
 	NFT_META_PRANDOM,
+	NFT_META_SECPATH,
 };
 
 /**
@@ -949,6 +957,17 @@ enum nft_ct_attributes {
 };
 #define NFTA_CT_MAX		(__NFTA_CT_MAX - 1)
 
+/**
+ * enum nft_flow_attributes - ct offload expression attributes
+ * @NFTA_FLOW_TABLE_NAME: flow table name (NLA_STRING)
+ */
+enum nft_offload_attributes {
+	NFTA_FLOW_UNSPEC,
+	NFTA_FLOW_TABLE_NAME,
+	__NFTA_FLOW_MAX,
+};
+#define NFTA_FLOW_MAX		(__NFTA_FLOW_MAX - 1)
+
 enum nft_limit_type {
 	NFT_LIMIT_PKTS,
 	NFT_LIMIT_PKT_BYTES
@@ -1308,6 +1327,53 @@ enum nft_object_attributes {
 #define NFTA_OBJ_MAX		(__NFTA_OBJ_MAX - 1)
 
 /**
+ * enum nft_flowtable_attributes - nf_tables flow table netlink attributes
+ *
+ * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING)
+ * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING)
+ * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
+ * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
+ */
+enum nft_flowtable_attributes {
+	NFTA_FLOWTABLE_UNSPEC,
+	NFTA_FLOWTABLE_TABLE,
+	NFTA_FLOWTABLE_NAME,
+	NFTA_FLOWTABLE_HOOK,
+	NFTA_FLOWTABLE_USE,
+	__NFTA_FLOWTABLE_MAX
+};
+#define NFTA_FLOWTABLE_MAX	(__NFTA_FLOWTABLE_MAX - 1)
+
+/**
+ * enum nft_flowtable_hook_attributes - nf_tables flow table hook netlink attributes
+ *
+ * @NFTA_FLOWTABLE_HOOK_NUM: netfilter hook number (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ * @NFTA_FLOWTABLE_HOOK_DEVS: input devices this flow table is bound to (NLA_NESTED)
+ */
+enum nft_flowtable_hook_attributes {
+	NFTA_FLOWTABLE_HOOK_UNSPEC,
+	NFTA_FLOWTABLE_HOOK_NUM,
+	NFTA_FLOWTABLE_HOOK_PRIORITY,
+	NFTA_FLOWTABLE_HOOK_DEVS,
+	__NFTA_FLOWTABLE_HOOK_MAX
+};
+#define NFTA_FLOWTABLE_HOOK_MAX	(__NFTA_FLOWTABLE_HOOK_MAX - 1)
+
+/**
+ * enum nft_device_attributes - nf_tables device netlink attributes
+ *
+ * @NFTA_DEVICE_NAME: name of this device (NLA_STRING)
+ */
+enum nft_devices_attributes {
+	NFTA_DEVICE_UNSPEC,
+	NFTA_DEVICE_NAME,
+	__NFTA_DEVICE_MAX
+};
+#define NFTA_DEVICE_MAX		(__NFTA_DEVICE_MAX - 1)
+
+
+/**
  * enum nft_trace_attributes - nf_tables trace netlink attributes
  *
  * @NFTA_TRACE_TABLE: name of the table (NLA_STRING)
diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h
index 07e5e9d..d4d1943 100644
--- a/include/uapi/linux/netfilter/xt_connlimit.h
+++ b/include/uapi/linux/netfilter/xt_connlimit.h
@@ -27,7 +27,7 @@ struct xt_connlimit_info {
 	__u32 flags;
 
 	/* Used internally by the kernel */
-	struct xt_connlimit_data *data __attribute__((aligned(8)));
+	struct nf_conncount_data *data __attribute__((aligned(8)));
 };
 
 #endif /* _XT_CONNLIMIT_H */
diff --git a/include/uapi/linux/netfilter_arp.h b/include/uapi/linux/netfilter_arp.h
index 81b6a4c..791dfc5 100644
--- a/include/uapi/linux/netfilter_arp.h
+++ b/include/uapi/linux/netfilter_arp.h
@@ -15,6 +15,9 @@
 #define NF_ARP_IN	0
 #define NF_ARP_OUT	1
 #define NF_ARP_FORWARD	2
+
+#ifndef __KERNEL__
 #define NF_ARP_NUMHOOKS	3
+#endif
 
 #endif /* __LINUX_ARP_NETFILTER_H */
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 9089c38..61f1c7d 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -24,6 +24,9 @@
 #define NFC_DN_IF_IN		0x0004
 /* Output device. */
 #define NFC_DN_IF_OUT		0x0008
+
+/* kernel define is in netfilter_defs.h */
+#define NF_DN_NUMHOOKS		7
 #endif /* ! __KERNEL__ */
 
 /* DECnet Hooks */
@@ -41,7 +44,6 @@
 #define NF_DN_HELLO		5
 /* Input Routing Packets */
 #define NF_DN_ROUTE		6
-#define NF_DN_NUMHOOKS		7
 
 enum nf_dn_hook_priorities {
 	NF_DN_PRI_FIRST = INT_MIN,
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f882fe1..c587a61 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3862,6 +3862,9 @@ enum nl80211_bss_scan_width {
  *	@NL80211_BSS_PARENT_BSSID. (u64).
  * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF
  *	is set.
+ * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update.
+ *	Contains a nested array of signal strength attributes (u8, dBm),
+ *	using the nesting index as the antenna number.
  * @__NL80211_BSS_AFTER_LAST: internal
  * @NL80211_BSS_MAX: highest BSS attribute
  */
@@ -3885,6 +3888,7 @@ enum nl80211_bss {
 	NL80211_BSS_PAD,
 	NL80211_BSS_PARENT_TSF,
 	NL80211_BSS_PARENT_BSSID,
+	NL80211_BSS_CHAIN_SIGNAL,
 
 	/* keep last */
 	__NL80211_BSS_AFTER_LAST,
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b9a4953..7695336 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
 	__u16	__reserved_2;	/* align to __u64 */
 };
 
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+	/*
+	 * The below ids array length
+	 */
+	__u32	ids_len;
+	/*
+	 * Set by the kernel to indicate the number of
+	 * available programs
+	 */
+	__u32	prog_cnt;
+	/*
+	 * User provided buffer to store program ids
+	 */
+	__u32	ids[0];
+};
+
 #define perf_flags(attr)	(*(&(attr)->read_format + 1))
 
 /*
@@ -433,6 +454,7 @@ struct perf_event_attr {
 #define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
 #define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
 #define PERF_EVENT_IOC_PAUSE_OUTPUT	_IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF	_IOWR('$', 10, struct perf_event_query_bpf *)
 
 enum perf_event_ioc_flags {
 	PERF_IOC_FLAG_GROUP		= 1U << 0,
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d9adab3..4c4db14 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -125,6 +125,7 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_SOCKOPT_PEELOFF_FLAGS 122
 #define SCTP_STREAM_SCHEDULER	123
 #define SCTP_STREAM_SCHEDULER_VALUE	124
+#define SCTP_INTERLEAVING_SUPPORTED	125
 
 /* PR-SCTP policies */
 #define SCTP_PR_SCTP_NONE	0x0000
@@ -459,6 +460,8 @@ struct sctp_pdapi_event {
 	__u32 pdapi_length;
 	__u32 pdapi_indication;
 	sctp_assoc_t pdapi_assoc_id;
+	__u32 pdapi_stream;
+	__u32 pdapi_seq;
 };
 
 enum { SCTP_PARTIAL_DELIVERY_ABORTED=0, };
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 35f79d1..14bacc7 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -117,10 +117,9 @@ static inline unsigned int tipc_node(__u32 addr)
 /*
  * Publication scopes when binding port names and port name sequences
  */
-
-#define TIPC_ZONE_SCOPE		1
-#define TIPC_CLUSTER_SCOPE	2
-#define TIPC_NODE_SCOPE		3
+#define TIPC_ZONE_SCOPE         1
+#define TIPC_CLUSTER_SCOPE      2
+#define TIPC_NODE_SCOPE         3
 
 /*
  * Limiting values for messages
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index fc353b5..5de6ed3 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -57,6 +57,8 @@
 					 * Steering */
 #define VIRTIO_NET_F_CTRL_MAC_ADDR 23	/* Set MAC address */
 
+#define VIRTIO_NET_F_SPEED_DUPLEX 63	/* Device set linkspeed and duplex */
+
 #ifndef VIRTIO_NET_NO_LEGACY
 #define VIRTIO_NET_F_GSO	6	/* Host handles pkts w/ any GSO type */
 #endif /* VIRTIO_NET_NO_LEGACY */
@@ -76,6 +78,17 @@ struct virtio_net_config {
 	__u16 max_virtqueue_pairs;
 	/* Default maximum transmit unit advice */
 	__u16 mtu;
+	/*
+	 * speed, in units of 1Mb. All values 0 to INT_MAX are legal.
+	 * Any other value stands for unknown.
+	 */
+	__u32 speed;
+	/*
+	 * 0x00 - half duplex
+	 * 0x01 - full duplex
+	 * Any other value stands for unknown.
+	 */
+	__u8 duplex;
 } __attribute__((packed));
 
 /*
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index e691da0..a713fd2 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -9,9 +9,11 @@
 obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
 obj-$(CONFIG_BPF_SYSCALL) += offload.o
 ifeq ($(CONFIG_STREAM_PARSER),y)
+ifeq ($(CONFIG_INET),y)
 obj-$(CONFIG_BPF_SYSCALL) += sockmap.o
 endif
 endif
+endif
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index b789ab7..c1c0b60 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -568,6 +568,8 @@ static bool cgroup_dev_is_valid_access(int off, int size,
 				       enum bpf_access_type type,
 				       struct bpf_insn_access_aux *info)
 {
+	const int size_default = sizeof(__u32);
+
 	if (type == BPF_WRITE)
 		return false;
 
@@ -576,8 +578,17 @@ static bool cgroup_dev_is_valid_access(int off, int size,
 	/* The verifier guarantees that size > 0. */
 	if (off % size != 0)
 		return false;
-	if (size != sizeof(__u32))
-		return false;
+
+	switch (off) {
+	case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
+		bpf_ctx_record_field_size(info, size_default);
+		if (!bpf_ctx_narrow_access_ok(off, size, size_default))
+			return false;
+		break;
+	default:
+		if (size != size_default)
+			return false;
+	}
 
 	return true;
 }
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 51ec2dd..25e723b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -94,6 +94,7 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 	fp->pages = size / PAGE_SIZE;
 	fp->aux = aux;
 	fp->aux->prog = fp;
+	fp->jit_requested = ebpf_jit_enabled();
 
 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
 
@@ -217,30 +218,40 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
 	return 0;
 }
 
-static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
-{
-	return BPF_CLASS(insn->code) == BPF_JMP  &&
-	       /* Call and Exit are both special jumps with no
-		* target inside the BPF instruction image.
-		*/
-	       BPF_OP(insn->code) != BPF_CALL &&
-	       BPF_OP(insn->code) != BPF_EXIT;
-}
-
 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
 {
 	struct bpf_insn *insn = prog->insnsi;
 	u32 i, insn_cnt = prog->len;
+	bool pseudo_call;
+	u8 code;
+	int off;
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
-		if (!bpf_is_jmp_and_has_target(insn))
+		code = insn->code;
+		if (BPF_CLASS(code) != BPF_JMP)
 			continue;
+		if (BPF_OP(code) == BPF_EXIT)
+			continue;
+		if (BPF_OP(code) == BPF_CALL) {
+			if (insn->src_reg == BPF_PSEUDO_CALL)
+				pseudo_call = true;
+			else
+				continue;
+		} else {
+			pseudo_call = false;
+		}
+		off = pseudo_call ? insn->imm : insn->off;
 
 		/* Adjust offset of jmps if we cross boundaries. */
-		if (i < pos && i + insn->off + 1 > pos)
-			insn->off += delta;
-		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
-			insn->off -= delta;
+		if (i < pos && i + off + 1 > pos)
+			off += delta;
+		else if (i > pos + delta && i + off + 1 <= pos + delta)
+			off -= delta;
+
+		if (pseudo_call)
+			insn->imm = off;
+		else
+			insn->off = off;
 	}
 }
 
@@ -711,7 +722,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 	struct bpf_insn *insn;
 	int i, rewritten;
 
-	if (!bpf_jit_blinding_enabled())
+	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
 		return prog;
 
 	clone = bpf_prog_clone_create(prog, GFP_USER);
@@ -753,13 +764,16 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 		i        += insn_delta;
 	}
 
+	clone->blinded = 1;
 	return clone;
 }
 #endif /* CONFIG_BPF_JIT */
 
 /* Base function for offset calculation. Needs to go into .text section,
  * therefore keeping it non-static as well; will also be used by JITs
- * anyway later on, so do not let the compiler omit it.
+ * anyway later on, so do not let the compiler omit it. This also needs
+ * to go into kallsyms for correlation from e.g. bpftool, so naming
+ * must not change.
  */
 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
@@ -775,8 +789,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
  *
  * Decode and execute eBPF instructions.
  */
-static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
-				    u64 *stack)
+static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
 {
 	u64 tmp;
 	static const void *jumptable[256] = {
@@ -836,6 +849,7 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 		[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
 		/* Call instruction */
 		[BPF_JMP | BPF_CALL] = &&JMP_CALL,
+		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
 		/* Jumps */
 		[BPF_JMP | BPF_JA] = &&JMP_JA,
@@ -1026,6 +1040,13 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
 						       BPF_R4, BPF_R5);
 		CONT;
 
+	JMP_CALL_ARGS:
+		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
+							    BPF_R3, BPF_R4,
+							    BPF_R5,
+							    insn + insn->off + 1);
+		CONT;
+
 	JMP_TAIL_CALL: {
 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
 		struct bpf_array *array = container_of(map, struct bpf_array, map);
@@ -1298,6 +1319,23 @@ static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn
 	return ___bpf_prog_run(regs, insn, stack); \
 }
 
+#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
+#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
+static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
+				      const struct bpf_insn *insn) \
+{ \
+	u64 stack[stack_size / sizeof(u64)]; \
+	u64 regs[MAX_BPF_REG]; \
+\
+	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
+	BPF_R1 = r1; \
+	BPF_R2 = r2; \
+	BPF_R3 = r3; \
+	BPF_R4 = r4; \
+	BPF_R5 = r5; \
+	return ___bpf_prog_run(regs, insn, stack); \
+}
+
 #define EVAL1(FN, X) FN(X)
 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
@@ -1309,6 +1347,10 @@ EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
 
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
+EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
+EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
+
 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
 
 static unsigned int (*interpreters[])(const void *ctx,
@@ -1317,6 +1359,24 @@ EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
 };
+#undef PROG_NAME_LIST
+#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
+static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
+				  const struct bpf_insn *insn) = {
+EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
+EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
+EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
+};
+#undef PROG_NAME_LIST
+
+void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
+{
+	stack_depth = max_t(u32, stack_depth, 1);
+	insn->off = (s16) insn->imm;
+	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
+		__bpf_call_base_args;
+	insn->code = BPF_JMP | BPF_CALL_ARGS;
+}
 
 #else
 static unsigned int __bpf_prog_ret0(const void *ctx,
@@ -1329,6 +1389,9 @@ static unsigned int __bpf_prog_ret0(const void *ctx,
 bool bpf_prog_array_compatible(struct bpf_array *array,
 			       const struct bpf_prog *fp)
 {
+	if (fp->kprobe_override)
+		return false;
+
 	if (!array->owner_prog_type) {
 		/* There's no owner yet where we could check for
 		 * compatibility.
@@ -1481,6 +1544,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
 	rcu_read_lock();
 	prog = rcu_dereference(progs)->progs;
 	for (; *prog; prog++) {
+		if (*prog == &dummy_bpf_prog.prog)
+			continue;
 		id = (*prog)->aux->id;
 		if (copy_to_user(prog_ids + i, &id, sizeof(id))) {
 			rcu_read_unlock();
@@ -1564,14 +1629,41 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
 	return 0;
 }
 
+int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+			     __u32 __user *prog_ids, u32 request_cnt,
+			     __u32 __user *prog_cnt)
+{
+	u32 cnt = 0;
+
+	if (array)
+		cnt = bpf_prog_array_length(array);
+
+	if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
+		return -EFAULT;
+
+	/* return early if user requested only program count or nothing to copy */
+	if (!request_cnt || !cnt)
+		return 0;
+
+	return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
+}
+
 static void bpf_prog_free_deferred(struct work_struct *work)
 {
 	struct bpf_prog_aux *aux;
+	int i;
 
 	aux = container_of(work, struct bpf_prog_aux, work);
 	if (bpf_prog_is_dev_bound(aux))
 		bpf_prog_offload_destroy(aux->prog);
-	bpf_jit_free(aux->prog);
+	for (i = 0; i < aux->func_cnt; i++)
+		bpf_jit_free(aux->func[i]);
+	if (aux->func_cnt) {
+		kfree(aux->func);
+		bpf_prog_unlock_free(aux->prog);
+	} else {
+		bpf_jit_free(aux->prog);
+	}
 }
 
 /* Free internal BPF program */
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index e682850..8740406 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
 };
 #undef __BPF_FUNC_STR_FN
 
-const char *func_id_name(int id)
+static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
+				   const struct bpf_insn *insn,
+				   char *buff, size_t len)
 {
 	BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
 
+	if (insn->src_reg != BPF_PSEUDO_CALL &&
+	    insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
+	    func_id_str[insn->imm])
+		return func_id_str[insn->imm];
+
+	if (cbs && cbs->cb_call)
+		return cbs->cb_call(cbs->private_data, insn);
+
+	if (insn->src_reg == BPF_PSEUDO_CALL)
+		snprintf(buff, len, "%+d", insn->imm);
+
+	return buff;
+}
+
+static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
+				   const struct bpf_insn *insn,
+				   u64 full_imm, char *buff, size_t len)
+{
+	if (cbs && cbs->cb_imm)
+		return cbs->cb_imm(cbs->private_data, insn, full_imm);
+
+	snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
+	return buff;
+}
+
+const char *func_id_name(int id)
+{
 	if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
 		return func_id_str[id];
 	else
@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
 	[BPF_EXIT >> 4] = "exit",
 };
 
-static void print_bpf_end_insn(bpf_insn_print_cb verbose,
+static void print_bpf_end_insn(bpf_insn_print_t verbose,
 			       struct bpf_verifier_env *env,
 			       const struct bpf_insn *insn)
 {
@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
 		insn->imm, insn->dst_reg);
 }
 
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
-		    const struct bpf_insn *insn, bool allow_ptr_leaks)
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+		    struct bpf_verifier_env *env,
+		    const struct bpf_insn *insn,
+		    bool allow_ptr_leaks)
 {
+	const bpf_insn_print_t verbose = cbs->cb_print;
 	u8 class = BPF_CLASS(insn->code);
 
 	if (class == BPF_ALU || class == BPF_ALU64) {
@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
 			 */
 			u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
 			bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
+			char tmp[64];
 
 			if (map_ptr && !allow_ptr_leaks)
 				imm = 0;
 
-			verbose(env, "(%02x) r%d = 0x%llx\n", insn->code,
-				insn->dst_reg, (unsigned long long)imm);
+			verbose(env, "(%02x) r%d = %s\n",
+				insn->code, insn->dst_reg,
+				__func_imm_name(cbs, insn, imm,
+						tmp, sizeof(tmp)));
 		} else {
 			verbose(env, "BUG_ld_%02x\n", insn->code);
 			return;
@@ -189,8 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
 		u8 opcode = BPF_OP(insn->code);
 
 		if (opcode == BPF_CALL) {
-			verbose(env, "(%02x) call %s#%d\n", insn->code,
-				func_id_name(insn->imm), insn->imm);
+			char tmp[64];
+
+			if (insn->src_reg == BPF_PSEUDO_CALL) {
+				verbose(env, "(%02x) call pc%s\n",
+					insn->code,
+					__func_get_name(cbs, insn,
+							tmp, sizeof(tmp)));
+			} else {
+				strcpy(tmp, "unknown");
+				verbose(env, "(%02x) call %s#%d\n", insn->code,
+					__func_get_name(cbs, insn,
+							tmp, sizeof(tmp)),
+					insn->imm);
+			}
 		} else if (insn->code == (BPF_JMP | BPF_JA)) {
 			verbose(env, "(%02x) goto pc%+d\n",
 				insn->code, insn->off);
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index 8de977e..e0857d0 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -17,16 +17,35 @@
 #include <linux/bpf.h>
 #include <linux/kernel.h>
 #include <linux/stringify.h>
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <string.h>
+#endif
+
+struct bpf_verifier_env;
 
 extern const char *const bpf_alu_string[16];
 extern const char *const bpf_class_string[8];
 
 const char *func_id_name(int id);
 
-struct bpf_verifier_env;
-typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env,
-				  const char *, ...);
-void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
-		    const struct bpf_insn *insn, bool allow_ptr_leaks);
+typedef void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
+				 const char *, ...);
+typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
+					      const struct bpf_insn *insn);
+typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
+					    const struct bpf_insn *insn,
+					    __u64 full_imm);
 
+struct bpf_insn_cbs {
+	bpf_insn_print_t	cb_print;
+	bpf_insn_revmap_call_t	cb_call;
+	bpf_insn_print_imm_t	cb_imm;
+	void			*private_data;
+};
+
+void print_bpf_insn(const struct bpf_insn_cbs *cbs,
+		    struct bpf_verifier_env *env,
+		    const struct bpf_insn *insn,
+		    bool allow_ptr_leaks);
 #endif
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 8455b89..040d4e0 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -16,17 +16,22 @@
 #include <linux/bpf.h>
 #include <linux/bpf_verifier.h>
 #include <linux/bug.h>
+#include <linux/kdev_t.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
+#include <linux/proc_ns.h>
 #include <linux/rtnetlink.h>
+#include <linux/rwsem.h>
 
-/* protected by RTNL */
+/* Protects bpf_prog_offload_devs and offload members of all progs.
+ * RTNL lock cannot be taken when holding this lock.
+ */
+static DECLARE_RWSEM(bpf_devs_lock);
 static LIST_HEAD(bpf_prog_offload_devs);
 
 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 {
-	struct net *net = current->nsproxy->net_ns;
 	struct bpf_dev_offload *offload;
 
 	if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
@@ -41,32 +46,40 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
 		return -ENOMEM;
 
 	offload->prog = prog;
-	init_waitqueue_head(&offload->verifier_done);
 
-	rtnl_lock();
-	offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
-	if (!offload->netdev) {
-		rtnl_unlock();
-		kfree(offload);
-		return -EINVAL;
-	}
+	offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
+					   attr->prog_ifindex);
+	if (!offload->netdev)
+		goto err_free;
 
+	down_write(&bpf_devs_lock);
+	if (offload->netdev->reg_state != NETREG_REGISTERED)
+		goto err_unlock;
 	prog->aux->offload = offload;
 	list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
-	rtnl_unlock();
+	dev_put(offload->netdev);
+	up_write(&bpf_devs_lock);
 
 	return 0;
+err_unlock:
+	up_write(&bpf_devs_lock);
+	dev_put(offload->netdev);
+err_free:
+	kfree(offload);
+	return -EINVAL;
 }
 
 static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
 			     struct netdev_bpf *data)
 {
-	struct net_device *netdev = prog->aux->offload->netdev;
+	struct bpf_dev_offload *offload = prog->aux->offload;
+	struct net_device *netdev;
 
 	ASSERT_RTNL();
 
-	if (!netdev)
+	if (!offload)
 		return -ENODEV;
+	netdev = offload->netdev;
 	if (!netdev->netdev_ops->ndo_bpf)
 		return -EOPNOTSUPP;
 
@@ -87,62 +100,63 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
 	if (err)
 		goto exit_unlock;
 
-	env->dev_ops = data.verifier.ops;
-
+	env->prog->aux->offload->dev_ops = data.verifier.ops;
 	env->prog->aux->offload->dev_state = true;
-	env->prog->aux->offload->verifier_running = true;
 exit_unlock:
 	rtnl_unlock();
 	return err;
 }
 
+int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
+				 int insn_idx, int prev_insn_idx)
+{
+	struct bpf_dev_offload *offload;
+	int ret = -ENODEV;
+
+	down_read(&bpf_devs_lock);
+	offload = env->prog->aux->offload;
+	if (offload)
+		ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+	up_read(&bpf_devs_lock);
+
+	return ret;
+}
+
 static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
 	struct bpf_dev_offload *offload = prog->aux->offload;
 	struct netdev_bpf data = {};
 
-	/* Caution - if netdev is destroyed before the program, this function
-	 * will be called twice.
-	 */
-
 	data.offload.prog = prog;
 
-	if (offload->verifier_running)
-		wait_event(offload->verifier_done, !offload->verifier_running);
-
 	if (offload->dev_state)
 		WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
 
-	offload->dev_state = false;
+	/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
+	bpf_prog_free_id(prog, true);
+
 	list_del_init(&offload->offloads);
-	offload->netdev = NULL;
+	kfree(offload);
+	prog->aux->offload = NULL;
 }
 
 void bpf_prog_offload_destroy(struct bpf_prog *prog)
 {
-	struct bpf_dev_offload *offload = prog->aux->offload;
-
-	offload->verifier_running = false;
-	wake_up(&offload->verifier_done);
-
 	rtnl_lock();
-	__bpf_prog_offload_destroy(prog);
+	down_write(&bpf_devs_lock);
+	if (prog->aux->offload)
+		__bpf_prog_offload_destroy(prog);
+	up_write(&bpf_devs_lock);
 	rtnl_unlock();
-
-	kfree(offload);
 }
 
 static int bpf_prog_offload_translate(struct bpf_prog *prog)
 {
-	struct bpf_dev_offload *offload = prog->aux->offload;
 	struct netdev_bpf data = {};
 	int ret;
 
 	data.offload.prog = prog;
 
-	offload->verifier_running = false;
-	wake_up(&offload->verifier_done);
-
 	rtnl_lock();
 	ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
 	rtnl_unlock();
@@ -164,6 +178,63 @@ int bpf_prog_offload_compile(struct bpf_prog *prog)
 	return bpf_prog_offload_translate(prog);
 }
 
+struct ns_get_path_bpf_prog_args {
+	struct bpf_prog *prog;
+	struct bpf_prog_info *info;
+};
+
+static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
+{
+	struct ns_get_path_bpf_prog_args *args = private_data;
+	struct bpf_prog_aux *aux = args->prog->aux;
+	struct ns_common *ns;
+	struct net *net;
+
+	rtnl_lock();
+	down_read(&bpf_devs_lock);
+
+	if (aux->offload) {
+		args->info->ifindex = aux->offload->netdev->ifindex;
+		net = dev_net(aux->offload->netdev);
+		get_net(net);
+		ns = &net->ns;
+	} else {
+		args->info->ifindex = 0;
+		ns = NULL;
+	}
+
+	up_read(&bpf_devs_lock);
+	rtnl_unlock();
+
+	return ns;
+}
+
+int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
+			       struct bpf_prog *prog)
+{
+	struct ns_get_path_bpf_prog_args args = {
+		.prog	= prog,
+		.info	= info,
+	};
+	struct inode *ns_inode;
+	struct path ns_path;
+	void *res;
+
+	res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
+	if (IS_ERR(res)) {
+		if (!info->ifindex)
+			return -ENODEV;
+		return PTR_ERR(res);
+	}
+
+	ns_inode = ns_path.dentry->d_inode;
+	info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
+	info->netns_ino = ns_inode->i_ino;
+	path_put(&ns_path);
+
+	return 0;
+}
+
 const struct bpf_prog_ops bpf_offload_prog_ops = {
 };
 
@@ -181,11 +252,13 @@ static int bpf_offload_notification(struct notifier_block *notifier,
 		if (netdev->reg_state != NETREG_UNREGISTERING)
 			break;
 
+		down_write(&bpf_devs_lock);
 		list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
 					 offloads) {
 			if (offload->netdev == netdev)
 				__bpf_prog_offload_destroy(offload->prog);
 		}
+		up_write(&bpf_devs_lock);
 		break;
 	default:
 		break;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 1712d31..0799686 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -96,14 +96,6 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 	return rcu_dereference_sk_user_data(sk);
 }
 
-/* compute the linear packet data range [data, data_end) for skb when
- * sk_skb type programs are in use.
- */
-static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
-{
-	TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
-}
-
 enum __sk_action {
 	__SK_DROP = 0,
 	__SK_PASS,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index a15bc63..6c63c22 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -226,9 +226,33 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
 	return 0;
 }
 
-static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+static int stack_map_get_next_key(struct bpf_map *map, void *key,
+				  void *next_key)
 {
-	return -EINVAL;
+	struct bpf_stack_map *smap = container_of(map,
+						  struct bpf_stack_map, map);
+	u32 id;
+
+	WARN_ON_ONCE(!rcu_read_lock_held());
+
+	if (!key) {
+		id = 0;
+	} else {
+		id = *(u32 *)key;
+		if (id >= smap->n_buckets || !smap->buckets[id])
+			id = 0;
+		else
+			id++;
+	}
+
+	while (id < smap->n_buckets && !smap->buckets[id])
+		id++;
+
+	if (id >= smap->n_buckets)
+		return -ENOENT;
+
+	*(u32 *)next_key = id;
+	return 0;
 }
 
 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5cb783f..2bac0dc 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -905,9 +905,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
 	return id > 0 ? 0 : id;
 }
 
-static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 {
-	/* cBPF to eBPF migrations are currently not in the idr store. */
+	/* cBPF to eBPF migrations are currently not in the idr store.
+	 * Offloaded programs are removed from the store when their device
+	 * disappears - even if someone grabs an fd to them they are unusable,
+	 * simply waiting for refcnt to drop to be freed.
+	 */
 	if (!prog->aux->id)
 		return;
 
@@ -917,6 +921,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 		__acquire(&prog_idr_lock);
 
 	idr_remove(&prog_idr, prog->aux->id);
+	prog->aux->id = 0;
 
 	if (do_idr_lock)
 		spin_unlock_bh(&prog_idr_lock);
@@ -937,10 +942,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
+		int i;
+
 		trace_bpf_prog_put_rcu(prog);
 		/* bpf_prog_free_id() must be called first */
 		bpf_prog_free_id(prog, do_idr_lock);
+
+		for (i = 0; i < prog->aux->func_cnt; i++)
+			bpf_prog_kallsyms_del(prog->aux->func[i]);
 		bpf_prog_kallsyms_del(prog);
+
 		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
 	}
 }
@@ -1151,6 +1162,8 @@ static int bpf_prog_load(union bpf_attr *attr)
 	if (!prog)
 		return -ENOMEM;
 
+	prog->aux->offload_requested = !!attr->prog_ifindex;
+
 	err = security_bpf_prog_alloc(prog->aux);
 	if (err)
 		goto free_prog_nouncharge;
@@ -1172,7 +1185,7 @@ static int bpf_prog_load(union bpf_attr *attr)
 	atomic_set(&prog->aux->refcnt, 1);
 	prog->gpl_compatible = is_gpl ? 1 : 0;
 
-	if (attr->prog_ifindex) {
+	if (bpf_prog_is_dev_bound(prog->aux)) {
 		err = bpf_prog_offload_init(prog, attr);
 		if (err)
 			goto free_prog;
@@ -1194,7 +1207,8 @@ static int bpf_prog_load(union bpf_attr *attr)
 		goto free_used_maps;
 
 	/* eBPF program is ready to be JITed */
-	prog = bpf_prog_select_runtime(prog, &err);
+	if (!prog->bpf_func)
+		prog = bpf_prog_select_runtime(prog, &err);
 	if (err < 0)
 		goto free_used_maps;
 
@@ -1551,6 +1565,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
 	return fd;
 }
 
+static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+					      unsigned long addr)
+{
+	int i;
+
+	for (i = 0; i < prog->aux->used_map_cnt; i++)
+		if (prog->aux->used_maps[i] == (void *)addr)
+			return prog->aux->used_maps[i];
+	return NULL;
+}
+
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+{
+	const struct bpf_map *map;
+	struct bpf_insn *insns;
+	u64 imm;
+	int i;
+
+	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+			GFP_USER);
+	if (!insns)
+		return insns;
+
+	for (i = 0; i < prog->len; i++) {
+		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+			insns[i].code = BPF_JMP | BPF_CALL;
+			insns[i].imm = BPF_FUNC_tail_call;
+			/* fall-through */
+		}
+		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+				insns[i].code = BPF_JMP | BPF_CALL;
+			if (!bpf_dump_raw_ok())
+				insns[i].imm = 0;
+			continue;
+		}
+
+		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+			continue;
+
+		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+		map = bpf_map_from_imm(prog, imm);
+		if (map) {
+			insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+			insns[i].imm = map->id;
+			insns[i + 1].imm = 0;
+			continue;
+		}
+
+		if (!bpf_dump_raw_ok() &&
+		    imm == (unsigned long)prog->aux) {
+			insns[i].imm = 0;
+			insns[i + 1].imm = 0;
+			continue;
+		}
+	}
+
+	return insns;
+}
+
 static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
 				   const union bpf_attr *attr,
 				   union bpf_attr __user *uattr)
@@ -1601,21 +1676,43 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
 	ulen = info.jited_prog_len;
 	info.jited_prog_len = prog->jited_len;
 	if (info.jited_prog_len && ulen) {
-		uinsns = u64_to_user_ptr(info.jited_prog_insns);
-		ulen = min_t(u32, info.jited_prog_len, ulen);
-		if (copy_to_user(uinsns, prog->bpf_func, ulen))
-			return -EFAULT;
+		if (bpf_dump_raw_ok()) {
+			uinsns = u64_to_user_ptr(info.jited_prog_insns);
+			ulen = min_t(u32, info.jited_prog_len, ulen);
+			if (copy_to_user(uinsns, prog->bpf_func, ulen))
+				return -EFAULT;
+		} else {
+			info.jited_prog_insns = 0;
+		}
 	}
 
 	ulen = info.xlated_prog_len;
 	info.xlated_prog_len = bpf_prog_insn_size(prog);
 	if (info.xlated_prog_len && ulen) {
+		struct bpf_insn *insns_sanitized;
+		bool fault;
+
+		if (prog->blinded && !bpf_dump_raw_ok()) {
+			info.xlated_prog_insns = 0;
+			goto done;
+		}
+		insns_sanitized = bpf_insn_prepare_dump(prog);
+		if (!insns_sanitized)
+			return -ENOMEM;
 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
 		ulen = min_t(u32, info.xlated_prog_len, ulen);
-		if (copy_to_user(uinsns, prog->insnsi, ulen))
+		fault = copy_to_user(uinsns, insns_sanitized, ulen);
+		kfree(insns_sanitized);
+		if (fault)
 			return -EFAULT;
 	}
 
+	if (bpf_prog_is_dev_bound(prog->aux)) {
+		err = bpf_prog_offload_info_fill(&info, prog);
+		if (err)
+			return err;
+	}
+
 done:
 	if (copy_to_user(uinfo, &info, info_len) ||
 	    put_user(info_len, &uattr->info.info_len))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b414d6b..48b61ca 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -20,6 +20,8 @@
 #include <linux/file.h>
 #include <linux/vmalloc.h>
 #include <linux/stringify.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
 
 #include "disasm.h"
 
@@ -167,11 +169,11 @@ struct bpf_call_arg_meta {
 static DEFINE_MUTEX(bpf_verifier_lock);
 
 /* log_level controls verbosity level of eBPF verifier.
- * verbose() is used to dump the verification trace to the log, so the user
- * can figure out what's wrong with the program
+ * bpf_verifier_log_write() is used to dump the verification trace to the log,
+ * so the user can figure out what's wrong with the program
  */
-static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
-				   const char *fmt, ...)
+__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
+					   const char *fmt, ...)
 {
 	struct bpf_verifer_log *log = &env->log;
 	unsigned int n;
@@ -195,6 +197,14 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
 	else
 		log->ubuf = NULL;
 }
+EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
+/* Historically bpf_verifier_log_write was called verbose, but the name was too
+ * generic for symbol export. The function was renamed, but not the calls in
+ * the verifier to avoid complicating backports. Hence the alias below.
+ */
+static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
+				   const char *fmt, ...)
+	__attribute__((alias("bpf_verifier_log_write")));
 
 static bool type_is_pkt_pointer(enum bpf_reg_type type)
 {
@@ -216,23 +226,48 @@ static const char * const reg_type_str[] = {
 	[PTR_TO_PACKET_END]	= "pkt_end",
 };
 
-static void print_verifier_state(struct bpf_verifier_env *env,
-				 struct bpf_verifier_state *state)
+static void print_liveness(struct bpf_verifier_env *env,
+			   enum bpf_reg_liveness live)
 {
-	struct bpf_reg_state *reg;
+	if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
+	    verbose(env, "_");
+	if (live & REG_LIVE_READ)
+		verbose(env, "r");
+	if (live & REG_LIVE_WRITTEN)
+		verbose(env, "w");
+}
+
+static struct bpf_func_state *func(struct bpf_verifier_env *env,
+				   const struct bpf_reg_state *reg)
+{
+	struct bpf_verifier_state *cur = env->cur_state;
+
+	return cur->frame[reg->frameno];
+}
+
+static void print_verifier_state(struct bpf_verifier_env *env,
+				 const struct bpf_func_state *state)
+{
+	const struct bpf_reg_state *reg;
 	enum bpf_reg_type t;
 	int i;
 
+	if (state->frameno)
+		verbose(env, " frame%d:", state->frameno);
 	for (i = 0; i < MAX_BPF_REG; i++) {
 		reg = &state->regs[i];
 		t = reg->type;
 		if (t == NOT_INIT)
 			continue;
-		verbose(env, " R%d=%s", i, reg_type_str[t]);
+		verbose(env, " R%d", i);
+		print_liveness(env, reg->live);
+		verbose(env, "=%s", reg_type_str[t]);
 		if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
 		    tnum_is_const(reg->var_off)) {
 			/* reg->off should be 0 for SCALAR_VALUE */
 			verbose(env, "%lld", reg->var_off.value + reg->off);
+			if (t == PTR_TO_STACK)
+				verbose(env, ",call_%d", func(env, reg)->callsite);
 		} else {
 			verbose(env, "(id=%d", reg->id);
 			if (t != SCALAR_VALUE)
@@ -277,16 +312,21 @@ static void print_verifier_state(struct bpf_verifier_env *env,
 		}
 	}
 	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-		if (state->stack[i].slot_type[0] == STACK_SPILL)
-			verbose(env, " fp%d=%s",
-				-MAX_BPF_STACK + i * BPF_REG_SIZE,
+		if (state->stack[i].slot_type[0] == STACK_SPILL) {
+			verbose(env, " fp%d",
+				(-i - 1) * BPF_REG_SIZE);
+			print_liveness(env, state->stack[i].spilled_ptr.live);
+			verbose(env, "=%s",
 				reg_type_str[state->stack[i].spilled_ptr.type]);
+		}
+		if (state->stack[i].slot_type[0] == STACK_ZERO)
+			verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE);
 	}
 	verbose(env, "\n");
 }
 
-static int copy_stack_state(struct bpf_verifier_state *dst,
-			    const struct bpf_verifier_state *src)
+static int copy_stack_state(struct bpf_func_state *dst,
+			    const struct bpf_func_state *src)
 {
 	if (!src->stack)
 		return 0;
@@ -302,13 +342,13 @@ static int copy_stack_state(struct bpf_verifier_state *dst,
 
 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
  * make it consume minimal amount of memory. check_stack_write() access from
- * the program calls into realloc_verifier_state() to grow the stack size.
+ * the program calls into realloc_func_state() to grow the stack size.
  * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
  * which this function copies over. It points to previous bpf_verifier_state
  * which is never reallocated
  */
-static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
-				  bool copy_old)
+static int realloc_func_state(struct bpf_func_state *state, int size,
+			      bool copy_old)
 {
 	u32 old_size = state->allocated_stack;
 	struct bpf_stack_state *new_stack;
@@ -341,10 +381,23 @@ static int realloc_verifier_state(struct bpf_verifier_state *state, int size,
 	return 0;
 }
 
+static void free_func_state(struct bpf_func_state *state)
+{
+	if (!state)
+		return;
+	kfree(state->stack);
+	kfree(state);
+}
+
 static void free_verifier_state(struct bpf_verifier_state *state,
 				bool free_self)
 {
-	kfree(state->stack);
+	int i;
+
+	for (i = 0; i <= state->curframe; i++) {
+		free_func_state(state->frame[i]);
+		state->frame[i] = NULL;
+	}
 	if (free_self)
 		kfree(state);
 }
@@ -352,18 +405,46 @@ static void free_verifier_state(struct bpf_verifier_state *state,
 /* copy verifier state from src to dst growing dst stack space
  * when necessary to accommodate larger src stack
  */
-static int copy_verifier_state(struct bpf_verifier_state *dst,
-			       const struct bpf_verifier_state *src)
+static int copy_func_state(struct bpf_func_state *dst,
+			   const struct bpf_func_state *src)
 {
 	int err;
 
-	err = realloc_verifier_state(dst, src->allocated_stack, false);
+	err = realloc_func_state(dst, src->allocated_stack, false);
 	if (err)
 		return err;
-	memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack));
+	memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
 	return copy_stack_state(dst, src);
 }
 
+static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+			       const struct bpf_verifier_state *src)
+{
+	struct bpf_func_state *dst;
+	int i, err;
+
+	/* if dst has more stack frames then src frame, free them */
+	for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
+		free_func_state(dst_state->frame[i]);
+		dst_state->frame[i] = NULL;
+	}
+	dst_state->curframe = src->curframe;
+	dst_state->parent = src->parent;
+	for (i = 0; i <= src->curframe; i++) {
+		dst = dst_state->frame[i];
+		if (!dst) {
+			dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+			if (!dst)
+				return -ENOMEM;
+			dst_state->frame[i] = dst;
+		}
+		err = copy_func_state(dst, src->frame[i]);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
 		     int *insn_idx)
 {
@@ -416,6 +497,8 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
 	}
 	return &elem->st;
 err:
+	free_verifier_state(env->cur_state, true);
+	env->cur_state = NULL;
 	/* pop all elements and return */
 	while (!pop_stack(env, NULL, NULL));
 	return NULL;
@@ -425,6 +508,10 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
 static const int caller_saved[CALLER_SAVED_REGS] = {
 	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
 };
+#define CALLEE_SAVED_REGS 5
+static const int callee_saved[CALLEE_SAVED_REGS] = {
+	BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9
+};
 
 static void __mark_reg_not_init(struct bpf_reg_state *reg);
 
@@ -449,6 +536,13 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
 	__mark_reg_known(reg, 0);
 }
 
+static void __mark_reg_const_zero(struct bpf_reg_state *reg)
+{
+	__mark_reg_known(reg, 0);
+	reg->off = 0;
+	reg->type = SCALAR_VALUE;
+}
+
 static void mark_reg_known_zero(struct bpf_verifier_env *env,
 				struct bpf_reg_state *regs, u32 regno)
 {
@@ -560,6 +654,7 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg)
 	reg->id = 0;
 	reg->off = 0;
 	reg->var_off = tnum_unknown;
+	reg->frameno = 0;
 	__mark_reg_unbounded(reg);
 }
 
@@ -568,8 +663,8 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
 {
 	if (WARN_ON(regno >= MAX_BPF_REG)) {
 		verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
-		/* Something bad happened, let's kill all regs */
-		for (regno = 0; regno < MAX_BPF_REG; regno++)
+		/* Something bad happened, let's kill all regs except FP */
+		for (regno = 0; regno < BPF_REG_FP; regno++)
 			__mark_reg_not_init(regs + regno);
 		return;
 	}
@@ -587,8 +682,8 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
 {
 	if (WARN_ON(regno >= MAX_BPF_REG)) {
 		verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
-		/* Something bad happened, let's kill all regs */
-		for (regno = 0; regno < MAX_BPF_REG; regno++)
+		/* Something bad happened, let's kill all regs except FP */
+		for (regno = 0; regno < BPF_REG_FP; regno++)
 			__mark_reg_not_init(regs + regno);
 		return;
 	}
@@ -596,8 +691,9 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
 }
 
 static void init_reg_state(struct bpf_verifier_env *env,
-			   struct bpf_reg_state *regs)
+			   struct bpf_func_state *state)
 {
+	struct bpf_reg_state *regs = state->regs;
 	int i;
 
 	for (i = 0; i < MAX_BPF_REG; i++) {
@@ -608,41 +704,218 @@ static void init_reg_state(struct bpf_verifier_env *env,
 	/* frame pointer */
 	regs[BPF_REG_FP].type = PTR_TO_STACK;
 	mark_reg_known_zero(env, regs, BPF_REG_FP);
+	regs[BPF_REG_FP].frameno = state->frameno;
 
 	/* 1st arg to a function */
 	regs[BPF_REG_1].type = PTR_TO_CTX;
 	mark_reg_known_zero(env, regs, BPF_REG_1);
 }
 
+#define BPF_MAIN_FUNC (-1)
+static void init_func_state(struct bpf_verifier_env *env,
+			    struct bpf_func_state *state,
+			    int callsite, int frameno, int subprogno)
+{
+	state->callsite = callsite;
+	state->frameno = frameno;
+	state->subprogno = subprogno;
+	init_reg_state(env, state);
+}
+
 enum reg_arg_type {
 	SRC_OP,		/* register is used as source operand */
 	DST_OP,		/* register is used as destination operand */
 	DST_OP_NO_MARK	/* same as above, check only, don't mark */
 };
 
-static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno)
+static int cmp_subprogs(const void *a, const void *b)
 {
-	struct bpf_verifier_state *parent = state->parent;
+	return *(int *)a - *(int *)b;
+}
+
+static int find_subprog(struct bpf_verifier_env *env, int off)
+{
+	u32 *p;
+
+	p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
+		    sizeof(env->subprog_starts[0]), cmp_subprogs);
+	if (!p)
+		return -ENOENT;
+	return p - env->subprog_starts;
+
+}
+
+static int add_subprog(struct bpf_verifier_env *env, int off)
+{
+	int insn_cnt = env->prog->len;
+	int ret;
+
+	if (off >= insn_cnt || off < 0) {
+		verbose(env, "call to invalid destination\n");
+		return -EINVAL;
+	}
+	ret = find_subprog(env, off);
+	if (ret >= 0)
+		return 0;
+	if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
+		verbose(env, "too many subprograms\n");
+		return -E2BIG;
+	}
+	env->subprog_starts[env->subprog_cnt++] = off;
+	sort(env->subprog_starts, env->subprog_cnt,
+	     sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
+	return 0;
+}
+
+static int check_subprogs(struct bpf_verifier_env *env)
+{
+	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
+	struct bpf_insn *insn = env->prog->insnsi;
+	int insn_cnt = env->prog->len;
+
+	/* determine subprog starts. The end is one before the next starts */
+	for (i = 0; i < insn_cnt; i++) {
+		if (insn[i].code != (BPF_JMP | BPF_CALL))
+			continue;
+		if (insn[i].src_reg != BPF_PSEUDO_CALL)
+			continue;
+		if (!env->allow_ptr_leaks) {
+			verbose(env, "function calls to other bpf functions are allowed for root only\n");
+			return -EPERM;
+		}
+		if (bpf_prog_is_dev_bound(env->prog->aux)) {
+			verbose(env, "function calls in offloaded programs are not supported yet\n");
+			return -EINVAL;
+		}
+		ret = add_subprog(env, i + insn[i].imm + 1);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (env->log.level > 1)
+		for (i = 0; i < env->subprog_cnt; i++)
+			verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]);
+
+	/* now check that all jumps are within the same subprog */
+	subprog_start = 0;
+	if (env->subprog_cnt == cur_subprog)
+		subprog_end = insn_cnt;
+	else
+		subprog_end = env->subprog_starts[cur_subprog++];
+	for (i = 0; i < insn_cnt; i++) {
+		u8 code = insn[i].code;
+
+		if (BPF_CLASS(code) != BPF_JMP)
+			goto next;
+		if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
+			goto next;
+		off = i + insn[i].off + 1;
+		if (off < subprog_start || off >= subprog_end) {
+			verbose(env, "jump out of range from insn %d to %d\n", i, off);
+			return -EINVAL;
+		}
+next:
+		if (i == subprog_end - 1) {
+			/* to avoid fall-through from one subprog into another
+			 * the last insn of the subprog should be either exit
+			 * or unconditional jump back
+			 */
+			if (code != (BPF_JMP | BPF_EXIT) &&
+			    code != (BPF_JMP | BPF_JA)) {
+				verbose(env, "last insn is not an exit or jmp\n");
+				return -EINVAL;
+			}
+			subprog_start = subprog_end;
+			if (env->subprog_cnt == cur_subprog)
+				subprog_end = insn_cnt;
+			else
+				subprog_end = env->subprog_starts[cur_subprog++];
+		}
+	}
+	return 0;
+}
+
+static
+struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
+				       const struct bpf_verifier_state *state,
+				       struct bpf_verifier_state *parent,
+				       u32 regno)
+{
+	struct bpf_verifier_state *tmp = NULL;
+
+	/* 'parent' could be a state of caller and
+	 * 'state' could be a state of callee. In such case
+	 * parent->curframe < state->curframe
+	 * and it's ok for r1 - r5 registers
+	 *
+	 * 'parent' could be a callee's state after it bpf_exit-ed.
+	 * In such case parent->curframe > state->curframe
+	 * and it's ok for r0 only
+	 */
+	if (parent->curframe == state->curframe ||
+	    (parent->curframe < state->curframe &&
+	     regno >= BPF_REG_1 && regno <= BPF_REG_5) ||
+	    (parent->curframe > state->curframe &&
+	       regno == BPF_REG_0))
+		return parent;
+
+	if (parent->curframe > state->curframe &&
+	    regno >= BPF_REG_6) {
+		/* for callee saved regs we have to skip the whole chain
+		 * of states that belong to callee and mark as LIVE_READ
+		 * the registers before the call
+		 */
+		tmp = parent;
+		while (tmp && tmp->curframe != state->curframe) {
+			tmp = tmp->parent;
+		}
+		if (!tmp)
+			goto bug;
+		parent = tmp;
+	} else {
+		goto bug;
+	}
+	return parent;
+bug:
+	verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
+	verbose(env, "regno %d parent frame %d current frame %d\n",
+		regno, parent->curframe, state->curframe);
+	return NULL;
+}
+
+static int mark_reg_read(struct bpf_verifier_env *env,
+			 const struct bpf_verifier_state *state,
+			 struct bpf_verifier_state *parent,
+			 u32 regno)
+{
+	bool writes = parent == state->parent; /* Observe write marks */
 
 	if (regno == BPF_REG_FP)
 		/* We don't need to worry about FP liveness because it's read-only */
-		return;
+		return 0;
 
 	while (parent) {
 		/* if read wasn't screened by an earlier write ... */
-		if (state->regs[regno].live & REG_LIVE_WRITTEN)
+		if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN)
 			break;
+		parent = skip_callee(env, state, parent, regno);
+		if (!parent)
+			return -EFAULT;
 		/* ... then we depend on parent's value */
-		parent->regs[regno].live |= REG_LIVE_READ;
+		parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ;
 		state = parent;
 		parent = state->parent;
+		writes = true;
 	}
+	return 0;
 }
 
 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
 			 enum reg_arg_type t)
 {
-	struct bpf_reg_state *regs = env->cur_state->regs;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *regs = state->regs;
 
 	if (regno >= MAX_BPF_REG) {
 		verbose(env, "R%d is invalid\n", regno);
@@ -655,7 +928,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
 			verbose(env, "R%d !read_ok\n", regno);
 			return -EACCES;
 		}
-		mark_reg_read(env->cur_state, regno);
+		return mark_reg_read(env, vstate, vstate->parent, regno);
 	} else {
 		/* check whether register used as dest operand can be written to */
 		if (regno == BPF_REG_FP) {
@@ -686,17 +959,25 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
 	}
 }
 
+/* Does this register contain a constant zero? */
+static bool register_is_null(struct bpf_reg_state *reg)
+{
+	return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
+}
+
 /* check_stack_read/write functions track spill/fill of registers,
  * stack boundary and alignment are checked in check_mem_access()
  */
 static int check_stack_write(struct bpf_verifier_env *env,
-			     struct bpf_verifier_state *state, int off,
-			     int size, int value_regno)
+			     struct bpf_func_state *state, /* func where register points to */
+			     int off, int size, int value_regno)
 {
+	struct bpf_func_state *cur; /* state of the current function */
 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+	enum bpf_reg_type type;
 
-	err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE),
-				     true);
+	err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
+				 true);
 	if (err)
 		return err;
 	/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -709,8 +990,9 @@ static int check_stack_write(struct bpf_verifier_env *env,
 		return -EACCES;
 	}
 
+	cur = env->cur_state->frame[env->cur_state->curframe];
 	if (value_regno >= 0 &&
-	    is_spillable_regtype(state->regs[value_regno].type)) {
+	    is_spillable_regtype((type = cur->regs[value_regno].type))) {
 
 		/* register containing pointer is being spilled into stack */
 		if (size != BPF_REG_SIZE) {
@@ -718,51 +1000,116 @@ static int check_stack_write(struct bpf_verifier_env *env,
 			return -EACCES;
 		}
 
+		if (state != cur && type == PTR_TO_STACK) {
+			verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
+			return -EINVAL;
+		}
+
 		/* save register state */
-		state->stack[spi].spilled_ptr = state->regs[value_regno];
+		state->stack[spi].spilled_ptr = cur->regs[value_regno];
 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
 
 		for (i = 0; i < BPF_REG_SIZE; i++)
 			state->stack[spi].slot_type[i] = STACK_SPILL;
 	} else {
+		u8 type = STACK_MISC;
+
 		/* regular write of data into stack */
 		state->stack[spi].spilled_ptr = (struct bpf_reg_state) {};
 
+		/* only mark the slot as written if all 8 bytes were written
+		 * otherwise read propagation may incorrectly stop too soon
+		 * when stack slots are partially written.
+		 * This heuristic means that read propagation will be
+		 * conservative, since it will add reg_live_read marks
+		 * to stack slots all the way to first state when programs
+		 * writes+reads less than 8 bytes
+		 */
+		if (size == BPF_REG_SIZE)
+			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+
+		/* when we zero initialize stack slots mark them as such */
+		if (value_regno >= 0 &&
+		    register_is_null(&cur->regs[value_regno]))
+			type = STACK_ZERO;
+
 		for (i = 0; i < size; i++)
 			state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
-				STACK_MISC;
+				type;
 	}
 	return 0;
 }
 
-static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot)
+/* registers of every function are unique and mark_reg_read() propagates
+ * the liveness in the following cases:
+ * - from callee into caller for R1 - R5 that were used as arguments
+ * - from caller into callee for R0 that used as result of the call
+ * - from caller to the same caller skipping states of the callee for R6 - R9,
+ *   since R6 - R9 are callee saved by implicit function prologue and
+ *   caller's R6 != callee's R6, so when we propagate liveness up to
+ *   parent states we need to skip callee states for R6 - R9.
+ *
+ * stack slot marking is different, since stacks of caller and callee are
+ * accessible in both (since caller can pass a pointer to caller's stack to
+ * callee which can pass it to another function), hence mark_stack_slot_read()
+ * has to propagate the stack liveness to all parent states at given frame number.
+ * Consider code:
+ * f1() {
+ *   ptr = fp - 8;
+ *   *ptr = ctx;
+ *   call f2 {
+ *      .. = *ptr;
+ *   }
+ *   .. = *ptr;
+ * }
+ * First *ptr is reading from f1's stack and mark_stack_slot_read() has
+ * to mark liveness at the f1's frame and not f2's frame.
+ * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has
+ * to propagate liveness to f2 states at f1's frame level and further into
+ * f1 states at f1's frame level until write into that stack slot
+ */
+static void mark_stack_slot_read(struct bpf_verifier_env *env,
+				 const struct bpf_verifier_state *state,
+				 struct bpf_verifier_state *parent,
+				 int slot, int frameno)
 {
-	struct bpf_verifier_state *parent = state->parent;
+	bool writes = parent == state->parent; /* Observe write marks */
 
 	while (parent) {
+		if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE)
+			/* since LIVE_WRITTEN mark is only done for full 8-byte
+			 * write the read marks are conservative and parent
+			 * state may not even have the stack allocated. In such case
+			 * end the propagation, since the loop reached beginning
+			 * of the function
+			 */
+			break;
 		/* if read wasn't screened by an earlier write ... */
-		if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
+		if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN)
 			break;
 		/* ... then we depend on parent's value */
-		parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
+		parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ;
 		state = parent;
 		parent = state->parent;
+		writes = true;
 	}
 }
 
 static int check_stack_read(struct bpf_verifier_env *env,
-			    struct bpf_verifier_state *state, int off, int size,
-			    int value_regno)
+			    struct bpf_func_state *reg_state /* func where register points to */,
+			    int off, int size, int value_regno)
 {
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
 	u8 *stype;
 
-	if (state->allocated_stack <= slot) {
+	if (reg_state->allocated_stack <= slot) {
 		verbose(env, "invalid read from stack off %d+0 size %d\n",
 			off, size);
 		return -EACCES;
 	}
-	stype = state->stack[spi].slot_type;
+	stype = reg_state->stack[spi].slot_type;
 
 	if (stype[0] == STACK_SPILL) {
 		if (size != BPF_REG_SIZE) {
@@ -778,21 +1125,44 @@ static int check_stack_read(struct bpf_verifier_env *env,
 
 		if (value_regno >= 0) {
 			/* restore register state from stack */
-			state->regs[value_regno] = state->stack[spi].spilled_ptr;
-			mark_stack_slot_read(state, spi);
+			state->regs[value_regno] = reg_state->stack[spi].spilled_ptr;
+			/* mark reg as written since spilled pointer state likely
+			 * has its liveness marks cleared by is_state_visited()
+			 * which resets stack/reg liveness for state transitions
+			 */
+			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
 		}
+		mark_stack_slot_read(env, vstate, vstate->parent, spi,
+				     reg_state->frameno);
 		return 0;
 	} else {
+		int zeros = 0;
+
 		for (i = 0; i < size; i++) {
-			if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) {
-				verbose(env, "invalid read from stack off %d+%d size %d\n",
-					off, i, size);
-				return -EACCES;
+			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
+				continue;
+			if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
+				zeros++;
+				continue;
 			}
+			verbose(env, "invalid read from stack off %d+%d size %d\n",
+				off, i, size);
+			return -EACCES;
 		}
-		if (value_regno >= 0)
-			/* have read misc data from the stack */
-			mark_reg_unknown(env, state->regs, value_regno);
+		mark_stack_slot_read(env, vstate, vstate->parent, spi,
+				     reg_state->frameno);
+		if (value_regno >= 0) {
+			if (zeros == size) {
+				/* any size read into register is zero extended,
+				 * so the whole register == const_zero
+				 */
+				__mark_reg_const_zero(&state->regs[value_regno]);
+			} else {
+				/* have read misc data from the stack */
+				mark_reg_unknown(env, state->regs, value_regno);
+			}
+			state->regs[value_regno].live |= REG_LIVE_WRITTEN;
+		}
 		return 0;
 	}
 }
@@ -817,7 +1187,8 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
 static int check_map_access(struct bpf_verifier_env *env, u32 regno,
 			    int off, int size, bool zero_size_allowed)
 {
-	struct bpf_verifier_state *state = env->cur_state;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	struct bpf_reg_state *reg = &state->regs[regno];
 	int err;
 
@@ -1072,6 +1443,103 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
 					   strict);
 }
 
+static int update_stack_depth(struct bpf_verifier_env *env,
+			      const struct bpf_func_state *func,
+			      int off)
+{
+	u16 stack = env->subprog_stack_depth[func->subprogno];
+
+	if (stack >= -off)
+		return 0;
+
+	/* update known max for given subprogram */
+	env->subprog_stack_depth[func->subprogno] = -off;
+	return 0;
+}
+
+/* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+ * Since recursion is prevented by check_cfg() this algorithm
+ * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+ */
+static int check_max_stack_depth(struct bpf_verifier_env *env)
+{
+	int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
+	struct bpf_insn *insn = env->prog->insnsi;
+	int insn_cnt = env->prog->len;
+	int ret_insn[MAX_CALL_FRAMES];
+	int ret_prog[MAX_CALL_FRAMES];
+
+process_func:
+	/* round up to 32-bytes, since this is granularity
+	 * of interpreter stack size
+	 */
+	depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+	if (depth > MAX_BPF_STACK) {
+		verbose(env, "combined stack size of %d calls is %d. Too large\n",
+			frame + 1, depth);
+		return -EACCES;
+	}
+continue_func:
+	if (env->subprog_cnt == subprog)
+		subprog_end = insn_cnt;
+	else
+		subprog_end = env->subprog_starts[subprog];
+	for (; i < subprog_end; i++) {
+		if (insn[i].code != (BPF_JMP | BPF_CALL))
+			continue;
+		if (insn[i].src_reg != BPF_PSEUDO_CALL)
+			continue;
+		/* remember insn and function to return to */
+		ret_insn[frame] = i + 1;
+		ret_prog[frame] = subprog;
+
+		/* find the callee */
+		i = i + insn[i].imm + 1;
+		subprog = find_subprog(env, i);
+		if (subprog < 0) {
+			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+				  i);
+			return -EFAULT;
+		}
+		subprog++;
+		frame++;
+		if (frame >= MAX_CALL_FRAMES) {
+			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
+			return -EFAULT;
+		}
+		goto process_func;
+	}
+	/* end of for() loop means the last insn of the 'subprog'
+	 * was reached. Doesn't matter whether it was JA or EXIT
+	 */
+	if (frame == 0)
+		return 0;
+	depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+	frame--;
+	i = ret_insn[frame];
+	subprog = ret_prog[frame];
+	goto continue_func;
+}
+
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+static int get_callee_stack_depth(struct bpf_verifier_env *env,
+				  const struct bpf_insn *insn, int idx)
+{
+	int start = idx + insn->imm + 1, subprog;
+
+	subprog = find_subprog(env, start);
+	if (subprog < 0) {
+		WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+			  start);
+		return -EFAULT;
+	}
+	subprog++;
+	return env->subprog_stack_depth[subprog];
+}
+#endif
+
 /* truncate register to smaller size (in bytes)
  * must be called with size < BPF_REG_SIZE
  */
@@ -1105,9 +1573,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
 			    int bpf_size, enum bpf_access_type t,
 			    int value_regno)
 {
-	struct bpf_verifier_state *state = env->cur_state;
 	struct bpf_reg_state *regs = cur_regs(env);
 	struct bpf_reg_state *reg = regs + regno;
+	struct bpf_func_state *state;
 	int size, err = 0;
 
 	size = bpf_size_to_bytes(bpf_size);
@@ -1196,8 +1664,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
 			return -EACCES;
 		}
 
-		if (env->prog->aux->stack_depth < -off)
-			env->prog->aux->stack_depth = -off;
+		state = func(env, reg);
+		err = update_stack_depth(env, state, off);
+		if (err)
+			return err;
 
 		if (t == BPF_WRITE)
 			err = check_stack_write(env, state, off, size,
@@ -1269,12 +1739,6 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
 				BPF_SIZE(insn->code), BPF_WRITE, -1);
 }
 
-/* Does this register contain a constant zero? */
-static bool register_is_null(struct bpf_reg_state reg)
-{
-	return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0);
-}
-
 /* when register 'regno' is passed into function that will read 'access_size'
  * bytes from that pointer, make sure that it's within stack boundary
  * and all elements of stack are initialized.
@@ -1285,32 +1749,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 				int access_size, bool zero_size_allowed,
 				struct bpf_call_arg_meta *meta)
 {
-	struct bpf_verifier_state *state = env->cur_state;
-	struct bpf_reg_state *regs = state->regs;
+	struct bpf_reg_state *reg = cur_regs(env) + regno;
+	struct bpf_func_state *state = func(env, reg);
 	int off, i, slot, spi;
 
-	if (regs[regno].type != PTR_TO_STACK) {
+	if (reg->type != PTR_TO_STACK) {
 		/* Allow zero-byte read from NULL, regardless of pointer type */
 		if (zero_size_allowed && access_size == 0 &&
-		    register_is_null(regs[regno]))
+		    register_is_null(reg))
 			return 0;
 
 		verbose(env, "R%d type=%s expected=%s\n", regno,
-			reg_type_str[regs[regno].type],
+			reg_type_str[reg->type],
 			reg_type_str[PTR_TO_STACK]);
 		return -EACCES;
 	}
 
 	/* Only allow fixed-offset stack reads */
-	if (!tnum_is_const(regs[regno].var_off)) {
+	if (!tnum_is_const(reg->var_off)) {
 		char tn_buf[48];
 
-		tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
+		tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
 			regno, tn_buf);
 		return -EACCES;
 	}
-	off = regs[regno].off + regs[regno].var_off.value;
+	off = reg->off + reg->var_off.value;
 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
 	    access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
 		verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
@@ -1318,9 +1782,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 		return -EACCES;
 	}
 
-	if (env->prog->aux->stack_depth < -off)
-		env->prog->aux->stack_depth = -off;
-
 	if (meta && meta->raw_mode) {
 		meta->access_size = access_size;
 		meta->regno = regno;
@@ -1328,17 +1789,32 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 	}
 
 	for (i = 0; i < access_size; i++) {
+		u8 *stype;
+
 		slot = -(off + i) - 1;
 		spi = slot / BPF_REG_SIZE;
-		if (state->allocated_stack <= slot ||
-		    state->stack[spi].slot_type[slot % BPF_REG_SIZE] !=
-			STACK_MISC) {
-			verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
-				off, i, access_size);
-			return -EACCES;
+		if (state->allocated_stack <= slot)
+			goto err;
+		stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
+		if (*stype == STACK_MISC)
+			goto mark;
+		if (*stype == STACK_ZERO) {
+			/* helper can write anything into the stack */
+			*stype = STACK_MISC;
+			goto mark;
 		}
+err:
+		verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
+			off, i, access_size);
+		return -EACCES;
+mark:
+		/* reading any byte out of 8-byte 'spill_slot' will cause
+		 * the whole slot to be marked as 'read'
+		 */
+		mark_stack_slot_read(env, env->cur_state, env->cur_state->parent,
+				     spi, state->frameno);
 	}
-	return 0;
+	return update_stack_depth(env, state, off);
 }
 
 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -1418,7 +1894,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
 		 * passed in as argument, it's a SCALAR_VALUE type. Final test
 		 * happens during stack boundary checking.
 		 */
-		if (register_is_null(*reg) &&
+		if (register_is_null(reg) &&
 		    arg_type == ARG_PTR_TO_MEM_OR_NULL)
 			/* final test in check_stack_boundary() */;
 		else if (!type_is_pkt_pointer(type) &&
@@ -1591,6 +2067,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
 	case BPF_FUNC_tail_call:
 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
 			goto error;
+		if (env->subprog_cnt) {
+			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
+			return -EINVAL;
+		}
 		break;
 	case BPF_FUNC_perf_event_read:
 	case BPF_FUNC_perf_event_output:
@@ -1652,9 +2132,9 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
  * are now invalid, so turn them into unknown SCALAR_VALUE.
  */
-static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
+				     struct bpf_func_state *state)
 {
-	struct bpf_verifier_state *state = env->cur_state;
 	struct bpf_reg_state *regs = state->regs, *reg;
 	int i;
 
@@ -1671,7 +2151,121 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
 	}
 }
 
-static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
+static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
+{
+	struct bpf_verifier_state *vstate = env->cur_state;
+	int i;
+
+	for (i = 0; i <= vstate->curframe; i++)
+		__clear_all_pkt_pointers(env, vstate->frame[i]);
+}
+
+static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
+			   int *insn_idx)
+{
+	struct bpf_verifier_state *state = env->cur_state;
+	struct bpf_func_state *caller, *callee;
+	int i, subprog, target_insn;
+
+	if (state->curframe + 1 >= MAX_CALL_FRAMES) {
+		verbose(env, "the call stack of %d frames is too deep\n",
+			state->curframe + 2);
+		return -E2BIG;
+	}
+
+	target_insn = *insn_idx + insn->imm;
+	subprog = find_subprog(env, target_insn + 1);
+	if (subprog < 0) {
+		verbose(env, "verifier bug. No program starts at insn %d\n",
+			target_insn + 1);
+		return -EFAULT;
+	}
+
+	caller = state->frame[state->curframe];
+	if (state->frame[state->curframe + 1]) {
+		verbose(env, "verifier bug. Frame %d already allocated\n",
+			state->curframe + 1);
+		return -EFAULT;
+	}
+
+	callee = kzalloc(sizeof(*callee), GFP_KERNEL);
+	if (!callee)
+		return -ENOMEM;
+	state->frame[state->curframe + 1] = callee;
+
+	/* callee cannot access r0, r6 - r9 for reading and has to write
+	 * into its own stack before reading from it.
+	 * callee can read/write into caller's stack
+	 */
+	init_func_state(env, callee,
+			/* remember the callsite, it will be used by bpf_exit */
+			*insn_idx /* callsite */,
+			state->curframe + 1 /* frameno within this callchain */,
+			subprog + 1 /* subprog number within this prog */);
+
+	/* copy r1 - r5 args that callee can access */
+	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+		callee->regs[i] = caller->regs[i];
+
+	/* after the call regsiters r0 - r5 were scratched */
+	for (i = 0; i < CALLER_SAVED_REGS; i++) {
+		mark_reg_not_init(env, caller->regs, caller_saved[i]);
+		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
+	}
+
+	/* only increment it after check_reg_arg() finished */
+	state->curframe++;
+
+	/* and go analyze first insn of the callee */
+	*insn_idx = target_insn;
+
+	if (env->log.level) {
+		verbose(env, "caller:\n");
+		print_verifier_state(env, caller);
+		verbose(env, "callee:\n");
+		print_verifier_state(env, callee);
+	}
+	return 0;
+}
+
+static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
+{
+	struct bpf_verifier_state *state = env->cur_state;
+	struct bpf_func_state *caller, *callee;
+	struct bpf_reg_state *r0;
+
+	callee = state->frame[state->curframe];
+	r0 = &callee->regs[BPF_REG_0];
+	if (r0->type == PTR_TO_STACK) {
+		/* technically it's ok to return caller's stack pointer
+		 * (or caller's caller's pointer) back to the caller,
+		 * since these pointers are valid. Only current stack
+		 * pointer will be invalid as soon as function exits,
+		 * but let's be conservative
+		 */
+		verbose(env, "cannot return stack pointer to the caller\n");
+		return -EINVAL;
+	}
+
+	state->curframe--;
+	caller = state->frame[state->curframe];
+	/* return to the caller whatever r0 had in the callee */
+	caller->regs[BPF_REG_0] = *r0;
+
+	*insn_idx = callee->callsite + 1;
+	if (env->log.level) {
+		verbose(env, "returning from callee:\n");
+		print_verifier_state(env, callee);
+		verbose(env, "to caller at %d:\n", *insn_idx);
+		print_verifier_state(env, caller);
+	}
+	/* clear everything in the callee */
+	free_func_state(callee);
+	state->frame[state->curframe + 1] = NULL;
+	return 0;
+}
+
+static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 {
 	const struct bpf_func_proto *fn = NULL;
 	struct bpf_reg_state *regs;
@@ -1871,7 +2465,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 				   const struct bpf_reg_state *ptr_reg,
 				   const struct bpf_reg_state *off_reg)
 {
-	struct bpf_reg_state *regs = cur_regs(env), *dst_reg;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *regs = state->regs, *dst_reg;
 	bool known = tnum_is_const(off_reg->var_off);
 	s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
 	    smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
@@ -1883,13 +2479,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 	dst_reg = &regs[dst];
 
 	if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
-		print_verifier_state(env, env->cur_state);
+		print_verifier_state(env, state);
 		verbose(env,
 			"verifier internal error: known but bad sbounds\n");
 		return -EINVAL;
 	}
 	if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
-		print_verifier_state(env, env->cur_state);
+		print_verifier_state(env, state);
 		verbose(env,
 			"verifier internal error: known but bad ubounds\n");
 		return -EINVAL;
@@ -2301,7 +2897,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
 				   struct bpf_insn *insn)
 {
-	struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
+	struct bpf_verifier_state *vstate = env->cur_state;
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
+	struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
 	u8 opcode = BPF_OP(insn->code);
 
@@ -2352,12 +2950,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
 
 	/* Got here implies adding two SCALAR_VALUEs */
 	if (WARN_ON_ONCE(ptr_reg)) {
-		print_verifier_state(env, env->cur_state);
+		print_verifier_state(env, state);
 		verbose(env, "verifier internal error: unexpected ptr_reg\n");
 		return -EINVAL;
 	}
 	if (WARN_ON(!src_reg)) {
-		print_verifier_state(env, env->cur_state);
+		print_verifier_state(env, state);
 		verbose(env, "verifier internal error: no src_reg\n");
 		return -EINVAL;
 	}
@@ -2514,14 +3112,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 	return 0;
 }
 
-static void find_good_pkt_pointers(struct bpf_verifier_state *state,
+static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
 				   struct bpf_reg_state *dst_reg,
 				   enum bpf_reg_type type,
 				   bool range_right_open)
 {
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	struct bpf_reg_state *regs = state->regs, *reg;
 	u16 new_range;
-	int i;
+	int i, j;
 
 	if (dst_reg->off < 0 ||
 	    (dst_reg->off == 0 && range_right_open))
@@ -2591,12 +3190,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
 			/* keep the maximum range already checked */
 			regs[i].range = max(regs[i].range, new_range);
 
-	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-		if (state->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		reg = &state->stack[i].spilled_ptr;
-		if (reg->type == type && reg->id == dst_reg->id)
-			reg->range = max(reg->range, new_range);
+	for (j = 0; j <= vstate->curframe; j++) {
+		state = vstate->frame[j];
+		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+			if (state->stack[i].slot_type[0] != STACK_SPILL)
+				continue;
+			reg = &state->stack[i].spilled_ptr;
+			if (reg->type == type && reg->id == dst_reg->id)
+				reg->range = max(reg->range, new_range);
+		}
 	}
 }
 
@@ -2834,20 +3436,24 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
 /* The logic is similar to find_good_pkt_pointers(), both could eventually
  * be folded together at some point.
  */
-static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
+static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno,
 			  bool is_null)
 {
+	struct bpf_func_state *state = vstate->frame[vstate->curframe];
 	struct bpf_reg_state *regs = state->regs;
 	u32 id = regs[regno].id;
-	int i;
+	int i, j;
 
 	for (i = 0; i < MAX_BPF_REG; i++)
 		mark_map_reg(regs, i, id, is_null);
 
-	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
-		if (state->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+	for (j = 0; j <= vstate->curframe; j++) {
+		state = vstate->frame[j];
+		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+			if (state->stack[i].slot_type[0] != STACK_SPILL)
+				continue;
+			mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null);
+		}
 	}
 }
 
@@ -2947,8 +3553,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
 static int check_cond_jmp_op(struct bpf_verifier_env *env,
 			     struct bpf_insn *insn, int *insn_idx)
 {
-	struct bpf_verifier_state *other_branch, *this_branch = env->cur_state;
-	struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
+	struct bpf_verifier_state *this_branch = env->cur_state;
+	struct bpf_verifier_state *other_branch;
+	struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
+	struct bpf_reg_state *dst_reg, *other_branch_regs;
 	u8 opcode = BPF_OP(insn->code);
 	int err;
 
@@ -2991,8 +3599,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	if (BPF_SRC(insn->code) == BPF_K &&
 	    (opcode == BPF_JEQ || opcode == BPF_JNE) &&
 	    dst_reg->type == SCALAR_VALUE &&
-	    tnum_equals_const(dst_reg->var_off, insn->imm)) {
-		if (opcode == BPF_JEQ) {
+	    tnum_is_const(dst_reg->var_off)) {
+		if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
+		    (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
 			/* if (imm == imm) goto pc+off;
 			 * only follow the goto, ignore fall-through
 			 */
@@ -3010,6 +3619,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 	other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
 	if (!other_branch)
 		return -EFAULT;
+	other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
 
 	/* detect if we are comparing against a constant value so we can adjust
 	 * our min/max values for our dst register.
@@ -3022,22 +3632,22 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		if (dst_reg->type == SCALAR_VALUE &&
 		    regs[insn->src_reg].type == SCALAR_VALUE) {
 			if (tnum_is_const(regs[insn->src_reg].var_off))
-				reg_set_min_max(&other_branch->regs[insn->dst_reg],
+				reg_set_min_max(&other_branch_regs[insn->dst_reg],
 						dst_reg, regs[insn->src_reg].var_off.value,
 						opcode);
 			else if (tnum_is_const(dst_reg->var_off))
-				reg_set_min_max_inv(&other_branch->regs[insn->src_reg],
+				reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
 						    &regs[insn->src_reg],
 						    dst_reg->var_off.value, opcode);
 			else if (opcode == BPF_JEQ || opcode == BPF_JNE)
 				/* Comparing for equality, we can combine knowledge */
-				reg_combine_min_max(&other_branch->regs[insn->src_reg],
-						    &other_branch->regs[insn->dst_reg],
+				reg_combine_min_max(&other_branch_regs[insn->src_reg],
+						    &other_branch_regs[insn->dst_reg],
 						    &regs[insn->src_reg],
 						    &regs[insn->dst_reg], opcode);
 		}
 	} else if (dst_reg->type == SCALAR_VALUE) {
-		reg_set_min_max(&other_branch->regs[insn->dst_reg],
+		reg_set_min_max(&other_branch_regs[insn->dst_reg],
 					dst_reg, insn->imm, opcode);
 	}
 
@@ -3058,7 +3668,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 		return -EACCES;
 	}
 	if (env->log.level)
-		print_verifier_state(env, this_branch);
+		print_verifier_state(env, this_branch->frame[this_branch->curframe]);
 	return 0;
 }
 
@@ -3143,6 +3753,18 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
 		return -EINVAL;
 	}
 
+	if (env->subprog_cnt) {
+		/* when program has LD_ABS insn JITs and interpreter assume
+		 * that r1 == ctx == skb which is not the case for callees
+		 * that can have arbitrary arguments. It's problematic
+		 * for main prog as well since JITs would need to analyze
+		 * all functions in order to make proper register save/restore
+		 * decisions in the main prog. Hence disallow LD_ABS with calls
+		 */
+		verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
+		return -EINVAL;
+	}
+
 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
 	    BPF_SIZE(insn->code) == BPF_DW ||
 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
@@ -3319,6 +3941,10 @@ static int check_cfg(struct bpf_verifier_env *env)
 	int ret = 0;
 	int i, t;
 
+	ret = check_subprogs(env);
+	if (ret < 0)
+		return ret;
+
 	insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
 	if (!insn_state)
 		return -ENOMEM;
@@ -3351,6 +3977,14 @@ static int check_cfg(struct bpf_verifier_env *env)
 				goto err_free;
 			if (t + 1 < insn_cnt)
 				env->explored_states[t + 1] = STATE_LIST_MARK;
+			if (insns[t].src_reg == BPF_PSEUDO_CALL) {
+				env->explored_states[t] = STATE_LIST_MARK;
+				ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
+				if (ret == 1)
+					goto peek_stack;
+				else if (ret < 0)
+					goto err_free;
+			}
 		} else if (opcode == BPF_JA) {
 			if (BPF_SRC(insns[t].code) != BPF_K) {
 				ret = -EINVAL;
@@ -3469,11 +4103,21 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
 static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
 		    struct idpair *idmap)
 {
+	bool equal;
+
 	if (!(rold->live & REG_LIVE_READ))
 		/* explored state didn't use this */
 		return true;
 
-	if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0)
+	equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0;
+
+	if (rold->type == PTR_TO_STACK)
+		/* two stack pointers are equal only if they're pointing to
+		 * the same stack frame, since fp-8 in foo != fp-8 in bar
+		 */
+		return equal && rold->frameno == rcur->frameno;
+
+	if (equal)
 		return true;
 
 	if (rold->type == NOT_INIT)
@@ -3545,7 +4189,6 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
 		       tnum_in(rold->var_off, rcur->var_off);
 	case PTR_TO_CTX:
 	case CONST_PTR_TO_MAP:
-	case PTR_TO_STACK:
 	case PTR_TO_PACKET_END:
 		/* Only valid matches are exact, which memcmp() above
 		 * would have accepted
@@ -3560,8 +4203,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
 	return false;
 }
 
-static bool stacksafe(struct bpf_verifier_state *old,
-		      struct bpf_verifier_state *cur,
+static bool stacksafe(struct bpf_func_state *old,
+		      struct bpf_func_state *cur,
 		      struct idpair *idmap)
 {
 	int i, spi;
@@ -3579,8 +4222,19 @@ static bool stacksafe(struct bpf_verifier_state *old,
 	for (i = 0; i < old->allocated_stack; i++) {
 		spi = i / BPF_REG_SIZE;
 
+		if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
+			/* explored state didn't use this */
+			continue;
+
 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
 			continue;
+		/* if old state was safe with misc data in the stack
+		 * it will be safe with zero-initialized stack.
+		 * The opposite is not true
+		 */
+		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
+		    cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
+			continue;
 		if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
 		    cur->stack[spi].slot_type[i % BPF_REG_SIZE])
 			/* Ex: old explored (safe) state has STACK_SPILL in
@@ -3637,9 +4291,8 @@ static bool stacksafe(struct bpf_verifier_state *old,
  * whereas register type in current state is meaningful, it means that
  * the current state will reach 'bpf_exit' instruction safely
  */
-static bool states_equal(struct bpf_verifier_env *env,
-			 struct bpf_verifier_state *old,
-			 struct bpf_verifier_state *cur)
+static bool func_states_equal(struct bpf_func_state *old,
+			      struct bpf_func_state *cur)
 {
 	struct idpair *idmap;
 	bool ret = false;
@@ -3663,71 +4316,72 @@ static bool states_equal(struct bpf_verifier_env *env,
 	return ret;
 }
 
-/* A write screens off any subsequent reads; but write marks come from the
- * straight-line code between a state and its parent.  When we arrive at a
- * jump target (in the first iteration of the propagate_liveness() loop),
- * we didn't arrive by the straight-line code, so read marks in state must
- * propagate to parent regardless of state's write marks.
- */
-static bool do_propagate_liveness(const struct bpf_verifier_state *state,
-				  struct bpf_verifier_state *parent)
+static bool states_equal(struct bpf_verifier_env *env,
+			 struct bpf_verifier_state *old,
+			 struct bpf_verifier_state *cur)
 {
-	bool writes = parent == state->parent; /* Observe write marks */
-	bool touched = false; /* any changes made? */
 	int i;
 
-	if (!parent)
-		return touched;
+	if (old->curframe != cur->curframe)
+		return false;
+
+	/* for states to be equal callsites have to be the same
+	 * and all frame states need to be equivalent
+	 */
+	for (i = 0; i <= old->curframe; i++) {
+		if (old->frame[i]->callsite != cur->frame[i]->callsite)
+			return false;
+		if (!func_states_equal(old->frame[i], cur->frame[i]))
+			return false;
+	}
+	return true;
+}
+
+/* A write screens off any subsequent reads; but write marks come from the
+ * straight-line code between a state and its parent.  When we arrive at an
+ * equivalent state (jump target or such) we didn't arrive by the straight-line
+ * code, so read marks in the state must propagate to the parent regardless
+ * of the state's write marks. That's what 'parent == state->parent' comparison
+ * in mark_reg_read() and mark_stack_slot_read() is for.
+ */
+static int propagate_liveness(struct bpf_verifier_env *env,
+			      const struct bpf_verifier_state *vstate,
+			      struct bpf_verifier_state *vparent)
+{
+	int i, frame, err = 0;
+	struct bpf_func_state *state, *parent;
+
+	if (vparent->curframe != vstate->curframe) {
+		WARN(1, "propagate_live: parent frame %d current frame %d\n",
+		     vparent->curframe, vstate->curframe);
+		return -EFAULT;
+	}
 	/* Propagate read liveness of registers... */
 	BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 	/* We don't need to worry about FP liveness because it's read-only */
 	for (i = 0; i < BPF_REG_FP; i++) {
-		if (parent->regs[i].live & REG_LIVE_READ)
+		if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
 			continue;
-		if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
-			continue;
-		if (state->regs[i].live & REG_LIVE_READ) {
-			parent->regs[i].live |= REG_LIVE_READ;
-			touched = true;
+		if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
+			err = mark_reg_read(env, vstate, vparent, i);
+			if (err)
+				return err;
 		}
 	}
-	/* ... and stack slots */
-	for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
-		    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
-		if (parent->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		if (state->stack[i].slot_type[0] != STACK_SPILL)
-			continue;
-		if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
-			continue;
-		if (writes &&
-		    (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN))
-			continue;
-		if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) {
-			parent->stack[i].spilled_ptr.live |= REG_LIVE_READ;
-			touched = true;
-		}
-	}
-	return touched;
-}
 
-/* "parent" is "a state from which we reach the current state", but initially
- * it is not the state->parent (i.e. "the state whose straight-line code leads
- * to the current state"), instead it is the state that happened to arrive at
- * a (prunable) equivalent of the current state.  See comment above
- * do_propagate_liveness() for consequences of this.
- * This function is just a more efficient way of calling mark_reg_read() or
- * mark_stack_slot_read() on each reg in "parent" that is read in "state",
- * though it requires that parent != state->parent in the call arguments.
- */
-static void propagate_liveness(const struct bpf_verifier_state *state,
-			       struct bpf_verifier_state *parent)
-{
-	while (do_propagate_liveness(state, parent)) {
-		/* Something changed, so we need to feed those changes onward */
-		state = parent;
-		parent = state->parent;
+	/* ... and stack slots */
+	for (frame = 0; frame <= vstate->curframe; frame++) {
+		state = vstate->frame[frame];
+		parent = vparent->frame[frame];
+		for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
+			    i < parent->allocated_stack / BPF_REG_SIZE; i++) {
+			if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ)
+				continue;
+			if (state->stack[i].spilled_ptr.live & REG_LIVE_READ)
+				mark_stack_slot_read(env, vstate, vparent, i, frame);
+		}
 	}
+	return err;
 }
 
 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
@@ -3735,7 +4389,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 	struct bpf_verifier_state_list *new_sl;
 	struct bpf_verifier_state_list *sl;
 	struct bpf_verifier_state *cur = env->cur_state;
-	int i, err;
+	int i, j, err;
 
 	sl = env->explored_states[insn_idx];
 	if (!sl)
@@ -3756,7 +4410,9 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 			 * they'll be immediately forgotten as we're pruning
 			 * this state and will pop a new one.
 			 */
-			propagate_liveness(&sl->state, cur);
+			err = propagate_liveness(env, &sl->state, cur);
+			if (err)
+				return err;
 			return 1;
 		}
 		sl = sl->next;
@@ -3764,9 +4420,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 
 	/* there were no equivalent states, remember current one.
 	 * technically the current state is not proven to be safe yet,
-	 * but it will either reach bpf_exit (which means it's safe) or
-	 * it will be rejected. Since there are no loops, we won't be
-	 * seeing this 'insn_idx' instruction again on the way to bpf_exit
+	 * but it will either reach outer most bpf_exit (which means it's safe)
+	 * or it will be rejected. Since there are no loops, we won't be
+	 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
+	 * again on the way to bpf_exit
 	 */
 	new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
 	if (!new_sl)
@@ -3790,19 +4447,15 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
 	 * explored_states can get read marks.)
 	 */
 	for (i = 0; i < BPF_REG_FP; i++)
-		cur->regs[i].live = REG_LIVE_NONE;
-	for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++)
-		if (cur->stack[i].slot_type[0] == STACK_SPILL)
-			cur->stack[i].spilled_ptr.live = REG_LIVE_NONE;
-	return 0;
-}
+		cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
 
-static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
-				  int insn_idx, int prev_insn_idx)
-{
-	if (env->dev_ops && env->dev_ops->insn_hook)
-		return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+	/* all stack frames are accessible from callee, clear them all */
+	for (j = 0; j <= cur->curframe; j++) {
+		struct bpf_func_state *frame = cur->frame[j];
 
+		for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++)
+			frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
+	}
 	return 0;
 }
 
@@ -3811,7 +4464,7 @@ static int do_check(struct bpf_verifier_env *env)
 	struct bpf_verifier_state *state;
 	struct bpf_insn *insns = env->prog->insnsi;
 	struct bpf_reg_state *regs;
-	int insn_cnt = env->prog->len;
+	int insn_cnt = env->prog->len, i;
 	int insn_idx, prev_insn_idx = 0;
 	int insn_processed = 0;
 	bool do_print_state = false;
@@ -3819,9 +4472,18 @@ static int do_check(struct bpf_verifier_env *env)
 	state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
 	if (!state)
 		return -ENOMEM;
-	env->cur_state = state;
-	init_reg_state(env, state->regs);
+	state->curframe = 0;
 	state->parent = NULL;
+	state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
+	if (!state->frame[0]) {
+		kfree(state);
+		return -ENOMEM;
+	}
+	env->cur_state = state;
+	init_func_state(env, state->frame[0],
+			BPF_MAIN_FUNC /* callsite */,
+			0 /* frameno */,
+			0 /* subprogno, zero == main subprog */);
 	insn_idx = 0;
 	for (;;) {
 		struct bpf_insn *insn;
@@ -3868,19 +4530,25 @@ static int do_check(struct bpf_verifier_env *env)
 			else
 				verbose(env, "\nfrom %d to %d:",
 					prev_insn_idx, insn_idx);
-			print_verifier_state(env, state);
+			print_verifier_state(env, state->frame[state->curframe]);
 			do_print_state = false;
 		}
 
 		if (env->log.level) {
+			const struct bpf_insn_cbs cbs = {
+				.cb_print	= verbose,
+			};
+
 			verbose(env, "%d: ", insn_idx);
-			print_bpf_insn(verbose, env, insn,
-				       env->allow_ptr_leaks);
+			print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
 		}
 
-		err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
-		if (err)
-			return err;
+		if (bpf_prog_is_dev_bound(env->prog->aux)) {
+			err = bpf_prog_offload_verify_insn(env, insn_idx,
+							   prev_insn_idx);
+			if (err)
+				return err;
+		}
 
 		regs = cur_regs(env);
 		env->insn_aux_data[insn_idx].seen = true;
@@ -4001,13 +4669,17 @@ static int do_check(struct bpf_verifier_env *env)
 			if (opcode == BPF_CALL) {
 				if (BPF_SRC(insn->code) != BPF_K ||
 				    insn->off != 0 ||
-				    insn->src_reg != BPF_REG_0 ||
+				    (insn->src_reg != BPF_REG_0 &&
+				     insn->src_reg != BPF_PSEUDO_CALL) ||
 				    insn->dst_reg != BPF_REG_0) {
 					verbose(env, "BPF_CALL uses reserved fields\n");
 					return -EINVAL;
 				}
 
-				err = check_call(env, insn->imm, insn_idx);
+				if (insn->src_reg == BPF_PSEUDO_CALL)
+					err = check_func_call(env, insn, &insn_idx);
+				else
+					err = check_helper_call(env, insn->imm, insn_idx);
 				if (err)
 					return err;
 
@@ -4032,6 +4704,16 @@ static int do_check(struct bpf_verifier_env *env)
 					return -EINVAL;
 				}
 
+				if (state->curframe) {
+					/* exit from nested function */
+					prev_insn_idx = insn_idx;
+					err = prepare_func_exit(env, &insn_idx);
+					if (err)
+						return err;
+					do_print_state = true;
+					continue;
+				}
+
 				/* eBPF calling convetion is such that R0 is used
 				 * to return the value from eBPF program.
 				 * Make sure that it's readable at this time
@@ -4092,8 +4774,16 @@ static int do_check(struct bpf_verifier_env *env)
 		insn_idx++;
 	}
 
-	verbose(env, "processed %d insns, stack depth %d\n", insn_processed,
-		env->prog->aux->stack_depth);
+	verbose(env, "processed %d insns, stack depth ", insn_processed);
+	for (i = 0; i < env->subprog_cnt + 1; i++) {
+		u32 depth = env->subprog_stack_depth[i];
+
+		verbose(env, "%d", depth);
+		if (i + 1 < env->subprog_cnt + 1)
+			verbose(env, "+");
+	}
+	verbose(env, "\n");
+	env->prog->aux->stack_depth = env->subprog_stack_depth[0];
 	return 0;
 }
 
@@ -4279,6 +4969,19 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
 	return 0;
 }
 
+static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
+{
+	int i;
+
+	if (len == 1)
+		return;
+	for (i = 0; i < env->subprog_cnt; i++) {
+		if (env->subprog_starts[i] < off)
+			continue;
+		env->subprog_starts[i] += len - 1;
+	}
+}
+
 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
 					    const struct bpf_insn *patch, u32 len)
 {
@@ -4289,6 +4992,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
 		return NULL;
 	if (adjust_insn_aux_data(env, new_prog->len, off, len))
 		return NULL;
+	adjust_subprog_starts(env, off, len);
 	return new_prog;
 }
 
@@ -4423,6 +5127,180 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
 	return 0;
 }
 
+static int jit_subprogs(struct bpf_verifier_env *env)
+{
+	struct bpf_prog *prog = env->prog, **func, *tmp;
+	int i, j, subprog_start, subprog_end = 0, len, subprog;
+	struct bpf_insn *insn;
+	void *old_bpf_func;
+	int err = -ENOMEM;
+
+	if (env->subprog_cnt == 0)
+		return 0;
+
+	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		subprog = find_subprog(env, i + insn->imm + 1);
+		if (subprog < 0) {
+			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+				  i + insn->imm + 1);
+			return -EFAULT;
+		}
+		/* temporarily remember subprog id inside insn instead of
+		 * aux_data, since next loop will split up all insns into funcs
+		 */
+		insn->off = subprog + 1;
+		/* remember original imm in case JIT fails and fallback
+		 * to interpreter will be needed
+		 */
+		env->insn_aux_data[i].call_imm = insn->imm;
+		/* point imm to __bpf_call_base+1 from JITs point of view */
+		insn->imm = 1;
+	}
+
+	func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
+	if (!func)
+		return -ENOMEM;
+
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		subprog_start = subprog_end;
+		if (env->subprog_cnt == i)
+			subprog_end = prog->len;
+		else
+			subprog_end = env->subprog_starts[i];
+
+		len = subprog_end - subprog_start;
+		func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
+		if (!func[i])
+			goto out_free;
+		memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
+		       len * sizeof(struct bpf_insn));
+		func[i]->type = prog->type;
+		func[i]->len = len;
+		if (bpf_prog_calc_tag(func[i]))
+			goto out_free;
+		func[i]->is_func = 1;
+		/* Use bpf_prog_F_tag to indicate functions in stack traces.
+		 * Long term would need debug info to populate names
+		 */
+		func[i]->aux->name[0] = 'F';
+		func[i]->aux->stack_depth = env->subprog_stack_depth[i];
+		func[i]->jit_requested = 1;
+		func[i] = bpf_int_jit_compile(func[i]);
+		if (!func[i]->jited) {
+			err = -ENOTSUPP;
+			goto out_free;
+		}
+		cond_resched();
+	}
+	/* at this point all bpf functions were successfully JITed
+	 * now populate all bpf_calls with correct addresses and
+	 * run last pass of JIT
+	 */
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		insn = func[i]->insnsi;
+		for (j = 0; j < func[i]->len; j++, insn++) {
+			if (insn->code != (BPF_JMP | BPF_CALL) ||
+			    insn->src_reg != BPF_PSEUDO_CALL)
+				continue;
+			subprog = insn->off;
+			insn->off = 0;
+			insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+				func[subprog]->bpf_func -
+				__bpf_call_base;
+		}
+	}
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		old_bpf_func = func[i]->bpf_func;
+		tmp = bpf_int_jit_compile(func[i]);
+		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
+			verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
+			err = -EFAULT;
+			goto out_free;
+		}
+		cond_resched();
+	}
+
+	/* finally lock prog and jit images for all functions and
+	 * populate kallsysm
+	 */
+	for (i = 0; i <= env->subprog_cnt; i++) {
+		bpf_prog_lock_ro(func[i]);
+		bpf_prog_kallsyms_add(func[i]);
+	}
+
+	/* Last step: make now unused interpreter insns from main
+	 * prog consistent for later dump requests, so they can
+	 * later look the same as if they were interpreted only.
+	 */
+	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+		unsigned long addr;
+
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		insn->off = env->insn_aux_data[i].call_imm;
+		subprog = find_subprog(env, i + insn->off + 1);
+		addr  = (unsigned long)func[subprog + 1]->bpf_func;
+		addr &= PAGE_MASK;
+		insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
+			    addr - __bpf_call_base;
+	}
+
+	prog->jited = 1;
+	prog->bpf_func = func[0]->bpf_func;
+	prog->aux->func = func;
+	prog->aux->func_cnt = env->subprog_cnt + 1;
+	return 0;
+out_free:
+	for (i = 0; i <= env->subprog_cnt; i++)
+		if (func[i])
+			bpf_jit_free(func[i]);
+	kfree(func);
+	/* cleanup main prog to be interpreted */
+	prog->jit_requested = 0;
+	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		insn->off = 0;
+		insn->imm = env->insn_aux_data[i].call_imm;
+	}
+	return err;
+}
+
+static int fixup_call_args(struct bpf_verifier_env *env)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+	struct bpf_prog *prog = env->prog;
+	struct bpf_insn *insn = prog->insnsi;
+	int i, depth;
+#endif
+	int err;
+
+	err = 0;
+	if (env->prog->jit_requested) {
+		err = jit_subprogs(env);
+		if (err == 0)
+			return 0;
+	}
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+	for (i = 0; i < prog->len; i++, insn++) {
+		if (insn->code != (BPF_JMP | BPF_CALL) ||
+		    insn->src_reg != BPF_PSEUDO_CALL)
+			continue;
+		depth = get_callee_stack_depth(env, insn, i);
+		if (depth < 0)
+			return depth;
+		bpf_patch_call_args(insn, depth);
+	}
+	err = 0;
+#endif
+	return err;
+}
+
 /* fixup insn->imm field of bpf_call instructions
  * and inline eligible helpers as explicit sequence of BPF instructions
  *
@@ -4442,11 +5320,15 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
 	for (i = 0; i < insn_cnt; i++, insn++) {
 		if (insn->code != (BPF_JMP | BPF_CALL))
 			continue;
+		if (insn->src_reg == BPF_PSEUDO_CALL)
+			continue;
 
 		if (insn->imm == BPF_FUNC_get_route_realm)
 			prog->dst_needed = 1;
 		if (insn->imm == BPF_FUNC_get_prandom_u32)
 			bpf_user_rnd_init_once();
+		if (insn->imm == BPF_FUNC_override_return)
+			prog->kprobe_override = 1;
 		if (insn->imm == BPF_FUNC_tail_call) {
 			/* If we tail call into other programs, we
 			 * cannot make any assumptions since they can
@@ -4498,7 +5380,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
 		/* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
 		 * handlers are currently limited to 64 bit only.
 		 */
-		if (ebpf_jit_enabled() && BITS_PER_LONG == 64 &&
+		if (prog->jit_requested && BITS_PER_LONG == 64 &&
 		    insn->imm == BPF_FUNC_map_lookup_elem) {
 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
 			if (map_ptr == BPF_MAP_PTR_POISON ||
@@ -4633,7 +5515,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
 		env->strict_alignment = true;
 
-	if (env->prog->aux->offload) {
+	if (bpf_prog_is_dev_bound(env->prog->aux)) {
 		ret = bpf_prog_offload_verifier_prep(env);
 		if (ret)
 			goto err_unlock;
@@ -4650,12 +5532,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
 	if (!env->explored_states)
 		goto skip_full_check;
 
+	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
 	ret = check_cfg(env);
 	if (ret < 0)
 		goto skip_full_check;
 
-	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
-
 	ret = do_check(env);
 	if (env->cur_state) {
 		free_verifier_state(env->cur_state, true);
@@ -4670,12 +5552,18 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
 		sanitize_dead_code(env);
 
 	if (ret == 0)
+		ret = check_max_stack_depth(env);
+
+	if (ret == 0)
 		/* program is valid, convert *(u32*)(ctx + off) accesses */
 		ret = convert_ctx_accesses(env);
 
 	if (ret == 0)
 		ret = fixup_bpf_calls(env);
 
+	if (ret == 0)
+		ret = fixup_call_args(env);
+
 	if (log->level && bpf_verifier_log_full(log))
 		ret = -ENOSPC;
 	if (log->level && !log->ubuf) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4df5b69..878d86c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4723,6 +4723,9 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
 		rcu_read_unlock();
 		return 0;
 	}
+
+	case PERF_EVENT_IOC_QUERY_BPF:
+		return perf_event_query_prog_array(event, (void __user *)arg);
 	default:
 		return -ENOTTY;
 	}
@@ -8080,6 +8083,13 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
 		return -EINVAL;
 	}
 
+	/* Kprobe override only works for kprobes, not uprobes. */
+	if (prog->kprobe_override &&
+	    !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
+		bpf_prog_put(prog);
+		return -EINVAL;
+	}
+
 	if (is_tracepoint || is_syscall_tp) {
 		int off = trace_event_get_offsets(event->tp_event);
 
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index da2ccf1..b4aab48 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -83,6 +83,16 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
 	return &(kretprobe_table_locks[hash].lock);
 }
 
+/* List of symbols that can be overriden for error injection. */
+static LIST_HEAD(kprobe_error_injection_list);
+static DEFINE_MUTEX(kprobe_ei_mutex);
+struct kprobe_ei_entry {
+	struct list_head list;
+	unsigned long start_addr;
+	unsigned long end_addr;
+	void *priv;
+};
+
 /* Blacklist -- list of struct kprobe_blacklist_entry */
 static LIST_HEAD(kprobe_blacklist);
 
@@ -1394,6 +1404,17 @@ bool within_kprobe_blacklist(unsigned long addr)
 	return false;
 }
 
+bool within_kprobe_error_injection_list(unsigned long addr)
+{
+	struct kprobe_ei_entry *ent;
+
+	list_for_each_entry(ent, &kprobe_error_injection_list, list) {
+		if (addr >= ent->start_addr && addr < ent->end_addr)
+			return true;
+	}
+	return false;
+}
+
 /*
  * If we have a symbol_name argument, look it up and add the offset field
  * to it. This way, we can specify a relative address to a symbol.
@@ -2168,6 +2189,86 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
 	return 0;
 }
 
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+/* Markers of the _kprobe_error_inject_list section */
+extern unsigned long __start_kprobe_error_inject_list[];
+extern unsigned long __stop_kprobe_error_inject_list[];
+
+/*
+ * Lookup and populate the kprobe_error_injection_list.
+ *
+ * For safety reasons we only allow certain functions to be overriden with
+ * bpf_error_injection, so we need to populate the list of the symbols that have
+ * been marked as safe for overriding.
+ */
+static void populate_kprobe_error_injection_list(unsigned long *start,
+						 unsigned long *end,
+						 void *priv)
+{
+	unsigned long *iter;
+	struct kprobe_ei_entry *ent;
+	unsigned long entry, offset = 0, size = 0;
+
+	mutex_lock(&kprobe_ei_mutex);
+	for (iter = start; iter < end; iter++) {
+		entry = arch_deref_entry_point((void *)*iter);
+
+		if (!kernel_text_address(entry) ||
+		    !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+			pr_err("Failed to find error inject entry at %p\n",
+				(void *)entry);
+			continue;
+		}
+
+		ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+		if (!ent)
+			break;
+		ent->start_addr = entry;
+		ent->end_addr = entry + size;
+		ent->priv = priv;
+		INIT_LIST_HEAD(&ent->list);
+		list_add_tail(&ent->list, &kprobe_error_injection_list);
+	}
+	mutex_unlock(&kprobe_ei_mutex);
+}
+
+static void __init populate_kernel_kprobe_ei_list(void)
+{
+	populate_kprobe_error_injection_list(__start_kprobe_error_inject_list,
+					     __stop_kprobe_error_inject_list,
+					     NULL);
+}
+
+static void module_load_kprobe_ei_list(struct module *mod)
+{
+	if (!mod->num_kprobe_ei_funcs)
+		return;
+	populate_kprobe_error_injection_list(mod->kprobe_ei_funcs,
+					     mod->kprobe_ei_funcs +
+					     mod->num_kprobe_ei_funcs, mod);
+}
+
+static void module_unload_kprobe_ei_list(struct module *mod)
+{
+	struct kprobe_ei_entry *ent, *n;
+	if (!mod->num_kprobe_ei_funcs)
+		return;
+
+	mutex_lock(&kprobe_ei_mutex);
+	list_for_each_entry_safe(ent, n, &kprobe_error_injection_list, list) {
+		if (ent->priv == mod) {
+			list_del_init(&ent->list);
+			kfree(ent);
+		}
+	}
+	mutex_unlock(&kprobe_ei_mutex);
+}
+#else
+static inline void __init populate_kernel_kprobe_ei_list(void) {}
+static inline void module_load_kprobe_ei_list(struct module *m) {}
+static inline void module_unload_kprobe_ei_list(struct module *m) {}
+#endif
+
 /* Module notifier call back, checking kprobes on the module */
 static int kprobes_module_callback(struct notifier_block *nb,
 				   unsigned long val, void *data)
@@ -2178,6 +2279,11 @@ static int kprobes_module_callback(struct notifier_block *nb,
 	unsigned int i;
 	int checkcore = (val == MODULE_STATE_GOING);
 
+	if (val == MODULE_STATE_COMING)
+		module_load_kprobe_ei_list(mod);
+	else if (val == MODULE_STATE_GOING)
+		module_unload_kprobe_ei_list(mod);
+
 	if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
 		return NOTIFY_DONE;
 
@@ -2240,6 +2346,8 @@ static int __init init_kprobes(void)
 		pr_err("Please take care of using kprobes.\n");
 	}
 
+	populate_kernel_kprobe_ei_list();
+
 	if (kretprobe_blacklist_size) {
 		/* lookup the function address from its name */
 		for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
@@ -2407,6 +2515,56 @@ static const struct file_operations debugfs_kprobe_blacklist_ops = {
 	.release        = seq_release,
 };
 
+/*
+ * kprobes/error_injection_list -- shows which functions can be overriden for
+ * error injection.
+ * */
+static void *kprobe_ei_seq_start(struct seq_file *m, loff_t *pos)
+{
+	mutex_lock(&kprobe_ei_mutex);
+	return seq_list_start(&kprobe_error_injection_list, *pos);
+}
+
+static void kprobe_ei_seq_stop(struct seq_file *m, void *v)
+{
+	mutex_unlock(&kprobe_ei_mutex);
+}
+
+static void *kprobe_ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &kprobe_error_injection_list, pos);
+}
+
+static int kprobe_ei_seq_show(struct seq_file *m, void *v)
+{
+	char buffer[KSYM_SYMBOL_LEN];
+	struct kprobe_ei_entry *ent =
+		list_entry(v, struct kprobe_ei_entry, list);
+
+	sprint_symbol(buffer, ent->start_addr);
+	seq_printf(m, "%s\n", buffer);
+	return 0;
+}
+
+static const struct seq_operations kprobe_ei_seq_ops = {
+	.start = kprobe_ei_seq_start,
+	.next  = kprobe_ei_seq_next,
+	.stop  = kprobe_ei_seq_stop,
+	.show  = kprobe_ei_seq_show,
+};
+
+static int kprobe_ei_open(struct inode *inode, struct file *filp)
+{
+	return seq_open(filp, &kprobe_ei_seq_ops);
+}
+
+static const struct file_operations debugfs_kprobe_ei_ops = {
+	.open           = kprobe_ei_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = seq_release,
+};
+
 static void arm_all_kprobes(void)
 {
 	struct hlist_head *head;
@@ -2548,6 +2706,11 @@ static int __init debugfs_kprobe_init(void)
 	if (!file)
 		goto error;
 
+	file = debugfs_create_file("error_injection_list", 0444, dir, NULL,
+				  &debugfs_kprobe_ei_ops);
+	if (!file)
+		goto error;
+
 	return 0;
 
 error:
diff --git a/kernel/module.c b/kernel/module.c
index dea01ac..bd695bf 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3118,7 +3118,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
 					     sizeof(*mod->ftrace_callsites),
 					     &mod->num_ftrace_callsites);
 #endif
-
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+	mod->kprobe_ei_funcs = section_objs(info, "_kprobe_error_inject_list",
+					    sizeof(*mod->kprobe_ei_funcs),
+					    &mod->num_kprobe_ei_funcs);
+#endif
 	mod->extable = section_objs(info, "__ex_table",
 				    sizeof(*mod->extable), &mod->num_exentries);
 
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 904c952..ae3a2d5 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -530,6 +530,17 @@
 
 	  If in doubt, say N.
 
+config BPF_KPROBE_OVERRIDE
+	bool "Enable BPF programs to override a kprobed function"
+	depends on BPF_EVENTS
+	depends on KPROBES_ON_FTRACE
+	depends on HAVE_KPROBE_OVERRIDE
+	depends on DYNAMIC_FTRACE_WITH_REGS
+	default n
+	help
+	 Allows BPF to override the execution of a probed function and
+	 set a different return value.  This is used for error injection.
+
 config FTRACE_MCOUNT_RECORD
 	def_bool y
 	depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 40207c2..f6d2327 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -13,6 +13,10 @@
 #include <linux/filter.h>
 #include <linux/uaccess.h>
 #include <linux/ctype.h>
+#include <linux/kprobes.h>
+#include <asm/kprobes.h>
+
+#include "trace_probe.h"
 #include "trace.h"
 
 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
@@ -76,6 +80,24 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
 }
 EXPORT_SYMBOL_GPL(trace_call_bpf);
 
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
+{
+	__this_cpu_write(bpf_kprobe_override, 1);
+	regs_set_return_value(regs, rc);
+	arch_ftrace_kprobe_override_function(regs);
+	return 0;
+}
+
+static const struct bpf_func_proto bpf_override_return_proto = {
+	.func		= bpf_override_return,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_ANYTHING,
+};
+#endif
+
 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 {
 	int ret;
@@ -556,6 +578,10 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
 		return &bpf_get_stackid_proto;
 	case BPF_FUNC_perf_event_read_value:
 		return &bpf_perf_event_read_value_proto;
+#ifdef CONFIG_BPF_KPROBE_OVERRIDE
+	case BPF_FUNC_override_return:
+		return &bpf_override_return_proto;
+#endif
 	default:
 		return tracing_func_proto(func_id);
 	}
@@ -773,6 +799,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
 	struct bpf_prog_array *new_array;
 	int ret = -EEXIST;
 
+	/*
+	 * Kprobe override only works for ftrace based kprobes, and only if they
+	 * are on the opt-in list.
+	 */
+	if (prog->kprobe_override &&
+	    (!trace_kprobe_ftrace(event->tp_event) ||
+	     !trace_kprobe_error_injectable(event->tp_event)))
+		return -EINVAL;
+
 	mutex_lock(&bpf_event_mutex);
 
 	if (event->prog)
@@ -825,3 +860,26 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
 unlock:
 	mutex_unlock(&bpf_event_mutex);
 }
+
+int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+{
+	struct perf_event_query_bpf __user *uquery = info;
+	struct perf_event_query_bpf query = {};
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (event->attr.type != PERF_TYPE_TRACEPOINT)
+		return -EINVAL;
+	if (copy_from_user(&query, uquery, sizeof(query)))
+		return -EFAULT;
+
+	mutex_lock(&bpf_event_mutex);
+	ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
+				       uquery->ids,
+				       query.ids_len,
+				       &uquery->prog_cnt);
+	mutex_unlock(&bpf_event_mutex);
+
+	return ret;
+}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 492700c..91f4b57 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -42,6 +42,7 @@ struct trace_kprobe {
 	(offsetof(struct trace_kprobe, tp.args) +	\
 	(sizeof(struct probe_arg) * (n)))
 
+DEFINE_PER_CPU(int, bpf_kprobe_override);
 
 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 {
@@ -87,6 +88,27 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
 	return nhit;
 }
 
+int trace_kprobe_ftrace(struct trace_event_call *call)
+{
+	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+	return kprobe_ftrace(&tk->rp.kp);
+}
+
+int trace_kprobe_error_injectable(struct trace_event_call *call)
+{
+	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
+	unsigned long addr;
+
+	if (tk->symbol) {
+		addr = (unsigned long)
+			kallsyms_lookup_name(trace_kprobe_symbol(tk));
+		addr += tk->rp.kp.offset;
+	} else {
+		addr = (unsigned long)tk->rp.kp.addr;
+	}
+	return within_kprobe_error_injection_list(addr);
+}
+
 static int register_kprobe_event(struct trace_kprobe *tk);
 static int unregister_kprobe_event(struct trace_kprobe *tk);
 
@@ -1170,7 +1192,7 @@ static int kretprobe_event_define_fields(struct trace_event_call *event_call)
 #ifdef CONFIG_PERF_EVENTS
 
 /* Kprobe profile handler */
-static void
+static int
 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 {
 	struct trace_event_call *call = &tk->tp.call;
@@ -1179,12 +1201,29 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 	int size, __size, dsize;
 	int rctx;
 
-	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
-		return;
+	if (bpf_prog_array_valid(call)) {
+		int ret;
+
+		ret = trace_call_bpf(call, regs);
+
+		/*
+		 * We need to check and see if we modified the pc of the
+		 * pt_regs, and if so clear the kprobe and return 1 so that we
+		 * don't do the instruction skipping.  Also reset our state so
+		 * we are clean the next pass through.
+		 */
+		if (__this_cpu_read(bpf_kprobe_override)) {
+			__this_cpu_write(bpf_kprobe_override, 0);
+			reset_current_kprobe();
+			return 1;
+		}
+		if (!ret)
+			return 0;
+	}
 
 	head = this_cpu_ptr(call->perf_events);
 	if (hlist_empty(head))
-		return;
+		return 0;
 
 	dsize = __get_data_size(&tk->tp, regs);
 	__size = sizeof(*entry) + tk->tp.size + dsize;
@@ -1193,13 +1232,14 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
 
 	entry = perf_trace_buf_alloc(size, NULL, &rctx);
 	if (!entry)
-		return;
+		return 0;
 
 	entry->ip = (unsigned long)tk->rp.kp.addr;
 	memset(&entry[1], 0, dsize);
 	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
 			      head, NULL);
+	return 0;
 }
 NOKPROBE_SYMBOL(kprobe_perf_func);
 
@@ -1275,16 +1315,24 @@ static int kprobe_register(struct trace_event_call *event,
 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 {
 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
+	int ret = 0;
 
 	raw_cpu_inc(*tk->nhit);
 
 	if (tk->tp.flags & TP_FLAG_TRACE)
 		kprobe_trace_func(tk, regs);
 #ifdef CONFIG_PERF_EVENTS
-	if (tk->tp.flags & TP_FLAG_PROFILE)
-		kprobe_perf_func(tk, regs);
+	if (tk->tp.flags & TP_FLAG_PROFILE) {
+		ret = kprobe_perf_func(tk, regs);
+		/*
+		 * The ftrace kprobe handler leaves it up to us to re-enable
+		 * preemption here before returning if we've modified the ip.
+		 */
+		if (ret)
+			preempt_enable_no_resched();
+	}
 #endif
-	return 0;	/* We don't tweek kernel, so just return 0 */
+	return ret;
 }
 NOKPROBE_SYMBOL(kprobe_dispatcher);
 
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index fb66e3e..5e54d74 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -252,6 +252,8 @@ struct symbol_cache;
 unsigned long update_symbol_cache(struct symbol_cache *sc);
 void free_symbol_cache(struct symbol_cache *sc);
 struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
+int trace_kprobe_ftrace(struct trace_event_call *call);
+int trace_kprobe_error_injectable(struct trace_event_call *call);
 #else
 /* uprobes do not support symbol fetch methods */
 #define fetch_symbol_u8			NULL
@@ -277,6 +279,16 @@ alloc_symbol_cache(const char *sym, long offset)
 {
 	return NULL;
 }
+
+static inline int trace_kprobe_ftrace(struct trace_event_call *call)
+{
+	return 0;
+}
+
+static inline int trace_kprobe_error_injectable(struct trace_event_call *call)
+{
+	return 0;
+}
 #endif /* CONFIG_KPROBE_EVENTS */
 
 struct probe_arg {
diff --git a/lib/Makefile b/lib/Makefile
index d11c48e..a6c8529 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -39,7 +39,7 @@
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
 	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
-	 once.o refcount.o usercopy.o errseq.o
+	 once.o refcount.o usercopy.o errseq.o bucket_locks.o
 obj-$(CONFIG_STRING_SELFTEST) += test_string.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
new file mode 100644
index 0000000..266a97c
--- /dev/null
+++ b/lib/bucket_locks.c
@@ -0,0 +1,54 @@
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
+ * indicate the number of elements to allocate in the array. max_size
+ * gives the maximum number of elements to allocate. cpu_mult gives
+ * the number of locks per CPU to allocate. The size is rounded up
+ * to a power of 2 to be suitable as a hash table.
+ */
+
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+			   size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+{
+	spinlock_t *tlocks = NULL;
+	unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+	unsigned int nr_pcpus = 2;
+#else
+	unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+	if (cpu_mult) {
+		nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
+		size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
+	} else {
+		size = max_size;
+	}
+
+	if (sizeof(spinlock_t) != 0) {
+		if (gfpflags_allow_blocking(gfp))
+			tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
+		else
+			tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+		if (!tlocks)
+			return -ENOMEM;
+		for (i = 0; i < size; i++)
+			spin_lock_init(&tlocks[i]);
+	}
+
+	*locks = tlocks;
+	*locks_mask = size - 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(alloc_bucket_spinlocks);
+
+void free_bucket_spinlocks(spinlock_t *locks)
+{
+	kvfree(locks);
+}
+EXPORT_SYMBOL(free_bucket_spinlocks);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ddd7dde..3825c30 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
 #define ASSERT_RHT_MUTEX(HT)
 #endif
 
-
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
-			      gfp_t gfp)
-{
-	unsigned int i, size;
-#if defined(CONFIG_PROVE_LOCKING)
-	unsigned int nr_pcpus = 2;
-#else
-	unsigned int nr_pcpus = num_possible_cpus();
-#endif
-
-	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
-	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
-
-	/* Never allocate more than 0.5 locks per bucket */
-	size = min_t(unsigned int, size, tbl->size >> 1);
-
-	if (tbl->nest)
-		size = min(size, 1U << tbl->nest);
-
-	if (sizeof(spinlock_t) != 0) {
-		if (gfpflags_allow_blocking(gfp))
-			tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
-		else
-			tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
-						   gfp);
-		if (!tbl->locks)
-			return -ENOMEM;
-		for (i = 0; i < size; i++)
-			spin_lock_init(&tbl->locks[i]);
-	}
-	tbl->locks_mask = size - 1;
-
-	return 0;
-}
-
 static void nested_table_free(union nested_table *ntbl, unsigned int size)
 {
 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
 	if (tbl->nest)
 		nested_bucket_table_free(tbl);
 
-	kvfree(tbl->locks);
+	free_bucket_spinlocks(tbl->locks);
 	kvfree(tbl);
 }
 
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
 					       gfp_t gfp)
 {
 	struct bucket_table *tbl = NULL;
-	size_t size;
+	size_t size, max_locks;
 	int i;
 
 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
 
 	tbl->size = size;
 
-	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
+	max_locks = size >> 1;
+	if (tbl->nest)
+		max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
+
+	if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
+				   ht->p.locks_mul, gfp) < 0) {
 		bucket_table_free(tbl);
 		return NULL;
 	}
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
 	iter->p = NULL;
 	iter->slot = 0;
 	iter->skip = 0;
+	iter->end_of_table = 0;
 
 	spin_lock(&ht->lock);
 	iter->walker.tbl =
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
 
 /**
- * rhashtable_walk_start - Start a hash table walk
+ * rhashtable_walk_start_check - Start a hash table walk
  * @iter:	Hash table iterator
  *
  * Start a hash table walk at the current iterator position.  Note that we take
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  * Returns -EAGAIN if resize event occured.  Note that the iterator
  * will rewind back to the beginning and you may use it immediately
  * by calling rhashtable_walk_next.
+ *
+ * rhashtable_walk_start is defined as an inline variant that returns
+ * void. This is preferred in cases where the caller would ignore
+ * resize events and always continue.
  */
-int rhashtable_walk_start(struct rhashtable_iter *iter)
+int rhashtable_walk_start_check(struct rhashtable_iter *iter)
 	__acquires(RCU)
 {
 	struct rhashtable *ht = iter->ht;
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
 		list_del(&iter->walker.list);
 	spin_unlock(&ht->lock);
 
-	if (!iter->walker.tbl) {
+	if (!iter->walker.tbl && !iter->end_of_table) {
 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
 		return -EAGAIN;
 	}
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
 
 /**
- * rhashtable_walk_next - Return the next object and advance the iterator
+ * __rhashtable_walk_find_next - Find the next element in a table (or the first
+ * one in case of a new walk).
+ *
  * @iter:	Hash table iterator
  *
- * Note that you must call rhashtable_walk_stop when you are finished
- * with the walk.
+ * Returns the found object or NULL when the end of the table is reached.
  *
- * Returns the next object or NULL when the end of the table is reached.
- *
- * Returns -EAGAIN if resize event occured.  Note that the iterator
- * will rewind back to the beginning and you may continue to use it.
+ * Returns -EAGAIN if resize event occurred.
  */
-void *rhashtable_walk_next(struct rhashtable_iter *iter)
+static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
 {
 	struct bucket_table *tbl = iter->walker.tbl;
 	struct rhlist_head *list = iter->list;
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
 	struct rhash_head *p = iter->p;
 	bool rhlist = ht->rhlist;
 
-	if (p) {
-		if (!rhlist || !(list = rcu_dereference(list->next))) {
-			p = rcu_dereference(p->next);
-			list = container_of(p, struct rhlist_head, rhead);
-		}
-		goto next;
-	}
+	if (!tbl)
+		return NULL;
 
 	for (; iter->slot < tbl->size; iter->slot++) {
 		int skip = iter->skip;
@@ -836,13 +803,90 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
 		iter->slot = 0;
 		iter->skip = 0;
 		return ERR_PTR(-EAGAIN);
+	} else {
+		iter->end_of_table = true;
 	}
 
 	return NULL;
 }
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter:	Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred.  Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+	struct rhlist_head *list = iter->list;
+	struct rhashtable *ht = iter->ht;
+	struct rhash_head *p = iter->p;
+	bool rhlist = ht->rhlist;
+
+	if (p) {
+		if (!rhlist || !(list = rcu_dereference(list->next))) {
+			p = rcu_dereference(p->next);
+			list = container_of(p, struct rhlist_head, rhead);
+		}
+		if (!rht_is_a_nulls(p)) {
+			iter->skip++;
+			iter->p = p;
+			iter->list = list;
+			return rht_obj(ht, rhlist ? &list->rhead : p);
+		}
+
+		/* At the end of this slot, switch to next one and then find
+		 * next entry from that point.
+		 */
+		iter->skip = 0;
+		iter->slot++;
+	}
+
+	return __rhashtable_walk_find_next(iter);
+}
 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
 
 /**
+ * rhashtable_walk_peek - Return the next object but don't advance the iterator
+ * @iter:	Hash table iterator
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred.  Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_peek(struct rhashtable_iter *iter)
+{
+	struct rhlist_head *list = iter->list;
+	struct rhashtable *ht = iter->ht;
+	struct rhash_head *p = iter->p;
+
+	if (p)
+		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
+
+	/* No object found in current iter, find next one in the table. */
+
+	if (iter->skip) {
+		/* A nonzero skip value points to the next entry in the table
+		 * beyond that last one that was found. Decrement skip so
+		 * we find the current value. __rhashtable_walk_find_next
+		 * will restore the original value of skip assuming that
+		 * the table hasn't changed.
+		 */
+		iter->skip--;
+	}
+
+	return __rhashtable_walk_find_next(iter);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
+
+/**
  * rhashtable_walk_stop - Finish a hash table walk
  * @iter:	Hash table iterator
  *
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8e83cbd..76d3667 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
 		return;
 	}
 
-	err = rhashtable_walk_start(&hti);
-	if (err && err != -EAGAIN) {
-		pr_warn("Test failed: iterator failed: %d\n", err);
-		return;
-	}
+	rhashtable_walk_start(&hti);
 
 	while ((pos = rhashtable_walk_next(&hti))) {
 		if (PTR_ERR(pos) == -EAGAIN) {
diff --git a/net/Kconfig b/net/Kconfig
index 9dba271..37ec8e6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -182,6 +182,7 @@
 	depends on BRIDGE
 	depends on NETFILTER && INET
 	depends on NETFILTER_ADVANCED
+	select NETFILTER_FAMILY_BRIDGE
 	default m
 	---help---
 	  Enabling this option will let arptables resp. iptables see bridged
@@ -336,23 +337,6 @@
 	  To compile this code as a module, choose M here: the
 	  module will be called pktgen.
 
-config NET_TCPPROBE
-	tristate "TCP connection probing"
-	depends on INET && PROC_FS && KPROBES
-	---help---
-	This module allows for capturing the changes to TCP connection
-	state in response to incoming packets. It is used for debugging
-	TCP congestion avoidance modules. If you don't understand
-	what was just said, you don't need it: say N.
-
-	Documentation on how to use TCP connection probing can be found
-	at:
-	
-	  http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
-
-	To compile this code as a module, choose M here: the
-	module will be called tcp_probe.
-
 config NET_DROP_MONITOR
 	tristate "Network packet drop alerting service"
 	depends on INET && TRACEPOINTS
diff --git a/net/atm/common.c b/net/atm/common.c
index 8a4f991..5763fd2 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -14,7 +14,7 @@
 #include <linux/capability.h>
 #include <linux/mm.h>
 #include <linux/sched/signal.h>
-#include <linux/time.h>		/* struct timeval */
+#include <linux/time64.h>	/* 64-bit time for seconds */
 #include <linux/skbuff.h>
 #include <linux/bitops.h>
 #include <linux/init.h>
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 7c6a1cc..31e0dcb 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1089,7 +1089,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 		msg->type = SND_MPOA_RES_RQST;
 		msg->content.in_info = entry->ctrl_info;
 		msg_to_mpoad(msg, mpc);
-		do_gettimeofday(&(entry->reply_wait));
+		entry->reply_wait = ktime_get_seconds();
 		mpc->in_ops->put(entry);
 		return;
 	}
@@ -1099,7 +1099,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 		msg->type = SND_MPOA_RES_RQST;
 		msg->content.in_info = entry->ctrl_info;
 		msg_to_mpoad(msg, mpc);
-		do_gettimeofday(&(entry->reply_wait));
+		entry->reply_wait = ktime_get_seconds();
 		mpc->in_ops->put(entry);
 		return;
 	}
@@ -1175,8 +1175,9 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
 	}
 
 	entry->ctrl_info = msg->content.in_info;
-	do_gettimeofday(&(entry->tv));
-	do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
+	entry->time = ktime_get_seconds();
+	/* Used in refreshing func from now on */
+	entry->reply_wait = ktime_get_seconds();
 	entry->refresh_time = 0;
 	ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
 
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index e01450b..4bb4183 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -117,7 +117,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
 
 	memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
 	entry->ctrl_info.in_dst_ip = dst_ip;
-	do_gettimeofday(&(entry->tv));
+	entry->time = ktime_get_seconds();
 	entry->retry_time = client->parameters.mpc_p4;
 	entry->count = 1;
 	entry->entry_state = INGRESS_INVALID;
@@ -148,7 +148,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
 			if (qos != NULL)
 				msg.qos = qos->qos;
 			msg_to_mpoad(&msg, mpc);
-			do_gettimeofday(&(entry->reply_wait));
+			entry->reply_wait = ktime_get_seconds();
 			entry->entry_state = INGRESS_RESOLVING;
 		}
 		if (entry->shortcut != NULL)
@@ -171,7 +171,7 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
 		if (qos != NULL)
 			msg.qos = qos->qos;
 		msg_to_mpoad(&msg, mpc);
-		do_gettimeofday(&(entry->reply_wait));
+		entry->reply_wait = ktime_get_seconds();
 	}
 
 	return CLOSED;
@@ -227,17 +227,16 @@ static void in_cache_remove_entry(in_cache_entry *entry,
 static void clear_count_and_expired(struct mpoa_client *client)
 {
 	in_cache_entry *entry, *next_entry;
-	struct timeval now;
+	time64_t now;
 
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	write_lock_bh(&client->ingress_lock);
 	entry = client->in_cache;
 	while (entry != NULL) {
 		entry->count = 0;
 		next_entry = entry->next;
-		if ((now.tv_sec - entry->tv.tv_sec)
-		   > entry->ctrl_info.holding_time) {
+		if ((now - entry->time) > entry->ctrl_info.holding_time) {
 			dprintk("holding time expired, ip = %pI4\n",
 				&entry->ctrl_info.in_dst_ip);
 			client->in_ops->remove_entry(entry, client);
@@ -253,35 +252,35 @@ static void check_resolving_entries(struct mpoa_client *client)
 
 	struct atm_mpoa_qos *qos;
 	in_cache_entry *entry;
-	struct timeval now;
+	time64_t now;
 	struct k_message msg;
 
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	read_lock_bh(&client->ingress_lock);
 	entry = client->in_cache;
 	while (entry != NULL) {
 		if (entry->entry_state == INGRESS_RESOLVING) {
-			if ((now.tv_sec - entry->hold_down.tv_sec) <
-			    client->parameters.mpc_p6) {
+
+			if ((now - entry->hold_down)
+					< client->parameters.mpc_p6) {
 				entry = entry->next;	/* Entry in hold down */
 				continue;
 			}
-			if ((now.tv_sec - entry->reply_wait.tv_sec) >
-			    entry->retry_time) {
+			if ((now - entry->reply_wait) > entry->retry_time) {
 				entry->retry_time = MPC_C1 * (entry->retry_time);
 				/*
 				 * Retry time maximum exceeded,
 				 * put entry in hold down.
 				 */
 				if (entry->retry_time > client->parameters.mpc_p5) {
-					do_gettimeofday(&(entry->hold_down));
+					entry->hold_down = ktime_get_seconds();
 					entry->retry_time = client->parameters.mpc_p4;
 					entry = entry->next;
 					continue;
 				}
 				/* Ask daemon to send a resolution request. */
-				memset(&(entry->hold_down), 0, sizeof(struct timeval));
+				memset(&entry->hold_down, 0, sizeof(time64_t));
 				msg.type = SND_MPOA_RES_RTRY;
 				memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
 				msg.content.in_info = entry->ctrl_info;
@@ -289,7 +288,7 @@ static void check_resolving_entries(struct mpoa_client *client)
 				if (qos != NULL)
 					msg.qos = qos->qos;
 				msg_to_mpoad(&msg, client);
-				do_gettimeofday(&(entry->reply_wait));
+				entry->reply_wait = ktime_get_seconds();
 			}
 		}
 		entry = entry->next;
@@ -300,18 +299,18 @@ static void check_resolving_entries(struct mpoa_client *client)
 /* Call this every MPC-p5 seconds. */
 static void refresh_entries(struct mpoa_client *client)
 {
-	struct timeval now;
+	time64_t now;
 	struct in_cache_entry *entry = client->in_cache;
 
 	ddprintk("refresh_entries\n");
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	read_lock_bh(&client->ingress_lock);
 	while (entry != NULL) {
 		if (entry->entry_state == INGRESS_RESOLVED) {
 			if (!(entry->refresh_time))
 				entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
-			if ((now.tv_sec - entry->reply_wait.tv_sec) >
+			if ((now - entry->reply_wait) >
 			    entry->refresh_time) {
 				dprintk("refreshing an entry.\n");
 				entry->entry_state = INGRESS_REFRESHING;
@@ -480,7 +479,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
 
 	memcpy(entry->MPS_ctrl_ATM_addr, client->mps_ctrl_addr, ATM_ESA_LEN);
 	entry->ctrl_info = msg->content.eg_info;
-	do_gettimeofday(&(entry->tv));
+	entry->time = ktime_get_seconds();
 	entry->entry_state = EGRESS_RESOLVED;
 	dprintk("new_eg_cache_entry cache_id %u\n",
 		ntohl(entry->ctrl_info.cache_id));
@@ -495,7 +494,7 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
 
 static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
 {
-	do_gettimeofday(&(entry->tv));
+	entry->time = ktime_get_seconds();
 	entry->entry_state = EGRESS_RESOLVED;
 	entry->ctrl_info.holding_time = holding_time;
 }
@@ -503,17 +502,16 @@ static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
 static void clear_expired(struct mpoa_client *client)
 {
 	eg_cache_entry *entry, *next_entry;
-	struct timeval now;
+	time64_t now;
 	struct k_message msg;
 
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	write_lock_irq(&client->egress_lock);
 	entry = client->eg_cache;
 	while (entry != NULL) {
 		next_entry = entry->next;
-		if ((now.tv_sec - entry->tv.tv_sec)
-		   > entry->ctrl_info.holding_time) {
+		if ((now - entry->time) > entry->ctrl_info.holding_time) {
 			msg.type = SND_EGRESS_PURGE;
 			msg.content.eg_info = entry->ctrl_info;
 			dprintk("egress_cache: holding time expired, cache_id = %u.\n",
diff --git a/net/atm/mpoa_caches.h b/net/atm/mpoa_caches.h
index 6a26666..464c4c7 100644
--- a/net/atm/mpoa_caches.h
+++ b/net/atm/mpoa_caches.h
@@ -2,6 +2,7 @@
 #ifndef MPOA_CACHES_H
 #define MPOA_CACHES_H
 
+#include <linux/time64.h>
 #include <linux/netdevice.h>
 #include <linux/types.h>
 #include <linux/atm.h>
@@ -16,9 +17,9 @@ void atm_mpoa_init_cache(struct mpoa_client *mpc);
 typedef struct in_cache_entry {
 	struct in_cache_entry *next;
 	struct in_cache_entry *prev;
-	struct timeval  tv;
-	struct timeval  reply_wait;
-	struct timeval  hold_down;
+	time64_t  time;
+	time64_t  reply_wait;
+	time64_t  hold_down;
 	uint32_t  packets_fwded;
 	uint16_t  entry_state;
 	uint32_t retry_time;
@@ -53,7 +54,7 @@ struct in_cache_ops{
 typedef struct eg_cache_entry{
 	struct               eg_cache_entry *next;
 	struct               eg_cache_entry *prev;
-	struct               timeval  tv;
+	time64_t	     time;
 	uint8_t              MPS_ctrl_ATM_addr[ATM_ESA_LEN];
 	struct atm_vcc       *shortcut;
 	uint32_t             packets_rcvd;
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 8a0c17e..2212da9 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -8,7 +8,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/proc_fs.h>
-#include <linux/time.h>
+#include <linux/ktime.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/atmmpc.h>
@@ -138,7 +138,7 @@ static int mpc_show(struct seq_file *m, void *v)
 	int i;
 	in_cache_entry *in_entry;
 	eg_cache_entry *eg_entry;
-	struct timeval now;
+	time64_t now;
 	unsigned char ip_string[16];
 
 	if (v == SEQ_START_TOKEN) {
@@ -148,15 +148,17 @@ static int mpc_show(struct seq_file *m, void *v)
 
 	seq_printf(m, "\nInterface %d:\n\n", mpc->dev_num);
 	seq_printf(m, "Ingress Entries:\nIP address      State      Holding time  Packets fwded  VPI  VCI\n");
-	do_gettimeofday(&now);
+	now = ktime_get_seconds();
 
 	for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
+		unsigned long seconds_delta = now - in_entry->time;
+
 		sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
 		seq_printf(m, "%-16s%s%-14lu%-12u",
 			   ip_string,
 			   ingress_state_string(in_entry->entry_state),
 			   in_entry->ctrl_info.holding_time -
-			   (now.tv_sec-in_entry->tv.tv_sec),
+			   seconds_delta,
 			   in_entry->packets_fwded);
 		if (in_entry->shortcut)
 			seq_printf(m, "   %-3d  %-3d",
@@ -169,13 +171,14 @@ static int mpc_show(struct seq_file *m, void *v)
 	seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id        State      Holding time  Packets recvd  Latest IP addr   VPI VCI\n");
 	for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
 		unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
+		unsigned long seconds_delta = now - eg_entry->time;
+
 		for (i = 0; i < ATM_ESA_LEN; i++)
 			seq_printf(m, "%02x", p[i]);
 		seq_printf(m, "\n%-16lu%s%-14lu%-15u",
 			   (unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
 			   egress_state_string(eg_entry->entry_state),
-			   (eg_entry->ctrl_info.holding_time -
-			    (now.tv_sec-eg_entry->tv.tv_sec)),
+			   (eg_entry->ctrl_info.holding_time - seconds_delta),
 			   eg_entry->packets_rcvd);
 
 		/* latest IP address */
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index b73b96a..c44f651 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -1,3 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
+#
+# Marek Lindner, Simon Wunderlich
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of version 2 of the GNU General Public
+# License as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+
 #
 # B.A.T.M.A.N meshing protocol
 #
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 915987b..022f6e7 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,4 +1,4 @@
-#
+# SPDX-License-Identifier: GPL-2.0
 # Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index 44fd073..80c72c7 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -37,7 +38,8 @@ char batadv_routing_algo[20] = "BATMAN_IV";
 static struct hlist_head batadv_algo_list;
 
 /**
- * batadv_algo_init - Initialize batman-adv algorithm management data structures
+ * batadv_algo_init() - Initialize batman-adv algorithm management data
+ *  structures
  */
 void batadv_algo_init(void)
 {
@@ -59,6 +61,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name)
 	return bat_algo_ops;
 }
 
+/**
+ * batadv_algo_register() - Register callbacks for a mesh algorithm
+ * @bat_algo_ops: mesh algorithm callbacks to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 {
 	struct batadv_algo_ops *bat_algo_ops_tmp;
@@ -88,6 +96,19 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 	return 0;
 }
 
+/**
+ * batadv_algo_select() - Select algorithm of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @name: name of the algorithm to select
+ *
+ * The algorithm callbacks for the soft interface will be set when the algorithm
+ * with the correct name was found. Any previous selected algorithm will not be
+ * deinitialized and the new selected algorithm will also not be initialized.
+ * It is therefore not allowed to call batadv_algo_select outside the creation
+ * function of the soft interface.
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 {
 	struct batadv_algo_ops *bat_algo_ops;
@@ -102,6 +123,14 @@ int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_algo_seq_print_text() - Print the supported algorithms in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct batadv_algo_ops *bat_algo_ops;
@@ -148,7 +177,7 @@ module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
 		0644);
 
 /**
- * batadv_algo_dump_entry - fill in information about one supported routing
+ * batadv_algo_dump_entry() - fill in information about one supported routing
  *  algorithm
  * @msg: netlink message to be sent back
  * @portid: Port to reply to
@@ -179,7 +208,7 @@ static int batadv_algo_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_algo_dump - fill in information about supported routing
+ * batadv_algo_dump() - fill in information about supported routing
  *  algorithms
  * @msg: netlink message to be sent back
  * @cb: Parameters to the netlink request
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 29f6312..0292216 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index bbe8414..79e3263 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
@@ -51,6 +52,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
@@ -72,21 +73,28 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work);
 
 /**
  * enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is no duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- *  neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
  */
 enum batadv_dup_status {
+	/** @BATADV_NO_DUP: the packet is no duplicate */
 	BATADV_NO_DUP = 0,
+
+	/**
+	 * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for
+	 *  the neighbor)
+	 */
 	BATADV_ORIG_DUP,
+
+	/** @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor */
 	BATADV_NEIGH_DUP,
+
+	/**
+	 * @BATADV_PROTECTED: originator is currently protected (after reboot)
+	 */
 	BATADV_PROTECTED,
 };
 
 /**
- * batadv_ring_buffer_set - update the ring buffer with the given value
+ * batadv_ring_buffer_set() - update the ring buffer with the given value
  * @lq_recv: pointer to the ring buffer
  * @lq_index: index to store the value at
  * @value: value to store in the ring buffer
@@ -98,7 +106,7 @@ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
 }
 
 /**
- * batadv_ring_buffer_avg - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg() - compute the average of all non-zero values stored
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
@@ -130,7 +138,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 }
 
 /**
- * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ * batadv_iv_ogm_orig_free() - free the private resources allocated for this
  *  orig_node
  * @orig_node: the orig_node for which the resources have to be free'd
  */
@@ -141,8 +149,8 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
- *  include the new hard-interface
+ * batadv_iv_ogm_orig_add_if() - change the private structures of the orig_node
+ *  to include the new hard-interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  *
@@ -186,7 +194,7 @@ static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_entry - drop section of bcast_own
+ * batadv_iv_ogm_drop_bcast_own_entry() - drop section of bcast_own
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -224,7 +232,7 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_drop_bcast_own_sum_entry - drop section of bcast_own_sum
+ * batadv_iv_ogm_drop_bcast_own_sum_entry() - drop section of bcast_own_sum
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -259,8 +267,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
- *  exclude the removed interface
+ * batadv_iv_ogm_orig_del_if() - change the private structures of the orig_node
+ *  to exclude the removed interface
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
@@ -290,7 +298,8 @@ static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an
+ *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of the originator
  *
@@ -447,7 +456,7 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
+ * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -557,7 +566,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_iv_ogm_can_aggregate - find out if an OGM can be aggregated on an
+ * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an
  *  existing forward packet
  * @new_bat_ogm_packet: OGM packet to be aggregated
  * @bat_priv: the bat priv with all the soft interface information
@@ -660,7 +669,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
 }
 
 /**
- * batadv_iv_ogm_aggregate_new - create a new aggregated packet and add this
+ * batadv_iv_ogm_aggregate_new() - create a new aggregated packet and add this
  *  packet to it.
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -743,7 +752,7 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
 }
 
 /**
- * batadv_iv_ogm_queue_add - queue up an OGM for transmission
+ * batadv_iv_ogm_queue_add() - queue up an OGM for transmission
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: pointer to the OGM
  * @packet_len: (total) length of the OGM
@@ -869,8 +878,8 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for
- * the given interface
+ * batadv_iv_ogm_slide_own_bcast_window() - bitshift own OGM broadcast windows
+ *  for the given interface
  * @hard_iface: the interface for which the windows have to be shifted
  */
 static void
@@ -987,7 +996,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
+ * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig node who originally emitted the ogm packet
@@ -1152,7 +1161,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_iv_ogm_calc_tq - calculate tq for current received ogm packet
+ * batadv_iv_ogm_calc_tq() - calculate tq for current received ogm packet
  * @orig_node: the orig node who originally emitted the ogm packet
  * @orig_neigh_node: the orig node struct of the neighbor who sent the packet
  * @batadv_ogm_packet: the ogm packet
@@ -1298,7 +1307,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_update_seqnos -  process a batman packet for all interfaces,
+ * batadv_iv_ogm_update_seqnos() -  process a batman packet for all interfaces,
  *  adjust the sequence number and find out whether it is a duplicate
  * @ethhdr: ethernet header of the packet
  * @batadv_ogm_packet: OGM packet to be considered
@@ -1401,7 +1410,8 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
 }
 
 /**
- * batadv_iv_ogm_process_per_outif - process a batman iv OGM for an outgoing if
+ * batadv_iv_ogm_process_per_outif() - process a batman iv OGM for an outgoing
+ *  interface
  * @skb: the skb containing the OGM
  * @ogm_offset: offset from skb->data to start of ogm header
  * @orig_node: the (cached) orig node for the originator of this OGM
@@ -1608,7 +1618,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset,
 }
 
 /**
- * batadv_iv_ogm_process - process an incoming batman iv OGM
+ * batadv_iv_ogm_process() - process an incoming batman iv OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -1861,7 +1871,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_ogm_orig_print_neigh - print neighbors for the originator table
+ * batadv_iv_ogm_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -1890,7 +1900,7 @@ batadv_iv_ogm_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_iv_ogm_orig_print - print the originator table
+ * batadv_iv_ogm_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -1960,7 +1970,7 @@ static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_ogm_neigh_get_tq_avg - Get the TQ average for a neighbour on a
+ * batadv_iv_ogm_neigh_get_tq_avg() - Get the TQ average for a neighbour on a
  *  given outgoing interface.
  * @neigh_node: Neighbour of interest
  * @if_outgoing: Outgoing interface of interest
@@ -1986,7 +1996,7 @@ batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_subentry - Dump an originator subentry into a
+ * batadv_iv_ogm_orig_dump_subentry() - Dump an originator subentry into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2048,7 +2058,7 @@ batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_entry - Dump an originator entry into a message
+ * batadv_iv_ogm_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2110,7 +2120,7 @@ batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump_bucket - Dump an originator bucket into a
+ * batadv_iv_ogm_orig_dump_bucket() - Dump an originator bucket into a
  *  message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2153,7 +2163,7 @@ batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_orig_dump - Dump the originators into a message
+ * batadv_iv_ogm_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2190,7 +2200,7 @@ batadv_iv_ogm_orig_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_iv_hardif_neigh_print - print a single hop neighbour node
+ * batadv_iv_hardif_neigh_print() - print a single hop neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -2209,7 +2219,7 @@ batadv_iv_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_print - print the single hop neighbour list
+ * batadv_iv_ogm_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -2242,7 +2252,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_ogm_neigh_diff - calculate tq difference of two neighbors
+ * batadv_iv_ogm_neigh_diff() - calculate tq difference of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2287,7 +2297,7 @@ static bool batadv_iv_ogm_neigh_diff(struct batadv_neigh_node *neigh1,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_neigh - Dump a neighbour into a netlink message
+ * batadv_iv_ogm_neigh_dump_neigh() - Dump a neighbour into a netlink message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2326,7 +2336,7 @@ batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump_hardif - Dump the neighbours of a hard interface
+ * batadv_iv_ogm_neigh_dump_hardif() - Dump the neighbours of a hard interface
  *  into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -2365,7 +2375,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_ogm_neigh_dump - Dump the neighbours into a message
+ * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2417,7 +2427,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 }
 
 /**
- * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * batadv_iv_ogm_neigh_cmp() - compare the metrics of two neighbors
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
  * @neigh2: the second neighbor object of the comparison
@@ -2443,7 +2453,7 @@ static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
 }
 
 /**
- * batadv_iv_ogm_neigh_is_sob - check if neigh1 is similarly good or better
+ * batadv_iv_ogm_neigh_is_sob() - check if neigh1 is similarly good or better
  *  than neigh2 from the metric prospective
  * @neigh1: the first neighbor object of the comparison
  * @if_outgoing1: outgoing interface for the first neighbor
@@ -2478,7 +2488,7 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_iv_init_sel_class - initialize GW selection class
+ * batadv_iv_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
@@ -2703,7 +2713,7 @@ static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_iv_gw_dump_entry - Dump a gateway into a message
+ * batadv_iv_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2774,7 +2784,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_iv_gw_dump - Dump gateways into a message
+ * batadv_iv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -2843,6 +2853,11 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
 	},
 };
 
+/**
+ * batadv_iv_init() - B.A.T.M.A.N. IV initialization function
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int __init batadv_iv_init(void)
 {
 	int ret;
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
index ae2ab52..9dc0dd5 100644
--- a/net/batman-adv/bat_iv_ogm.h
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index e0e2bfc..27e165a 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -36,6 +37,7 @@
 #include <linux/workqueue.h>
 #include <net/genetlink.h>
 #include <net/netlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -48,7 +50,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 
 struct sk_buff;
 
@@ -99,7 +100,7 @@ static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_iface_update_mac - react to hard-interface MAC address change
+ * batadv_v_iface_update_mac() - react to hard-interface MAC address change
  * @hard_iface: the modified interface
  *
  * If the modified interface is the primary one, update the originator
@@ -130,7 +131,7 @@ batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print_neigh - print neighbors for the originator table
+ * batadv_v_orig_print_neigh() - print neighbors for the originator table
  * @orig_node: the orig_node for which the neighbors are printed
  * @if_outgoing: outgoing interface for these entries
  * @seq: debugfs table seq_file struct
@@ -160,7 +161,7 @@ batadv_v_orig_print_neigh(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_v_hardif_neigh_print - print a single ELP neighbour node
+ * batadv_v_hardif_neigh_print() - print a single ELP neighbour node
  * @seq: neighbour table seq_file struct
  * @hardif_neigh: hardif neighbour information
  */
@@ -181,7 +182,7 @@ batadv_v_hardif_neigh_print(struct seq_file *seq,
 }
 
 /**
- * batadv_v_neigh_print - print the single hop neighbour list
+ * batadv_v_neigh_print() - print the single hop neighbour list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: neighbour table seq_file struct
  */
@@ -215,7 +216,7 @@ static void batadv_v_neigh_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_neigh_dump_neigh - Dump a neighbour into a message
+ * batadv_v_neigh_dump_neigh() - Dump a neighbour into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -258,7 +259,7 @@ batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump_hardif - Dump the  neighbours of a hard interface  into
+ * batadv_v_neigh_dump_hardif() - Dump the  neighbours of a hard interface into
  *  a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
@@ -296,7 +297,7 @@ batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_neigh_dump - Dump the neighbours of a hard interface  into a
+ * batadv_v_neigh_dump() - Dump the neighbours of a hard interface  into a
  *  message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
@@ -348,7 +349,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_v_orig_print - print the originator table
+ * batadv_v_orig_print() - print the originator table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  * @if_outgoing: the outgoing interface for which this should be printed
@@ -416,8 +417,7 @@ static void batadv_v_orig_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_orig_dump_subentry - Dump an originator subentry into a
- *  message
+ * batadv_v_orig_dump_subentry() - Dump an originator subentry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -483,7 +483,7 @@ batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_entry - Dump an originator entry into a message
+ * batadv_v_orig_dump_entry() - Dump an originator entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -536,8 +536,7 @@ batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump_bucket - Dump an originator bucket into a
- *  message
+ * batadv_v_orig_dump_bucket() - Dump an originator bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -578,7 +577,7 @@ batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_orig_dump - Dump the originators into a message
+ * batadv_v_orig_dump() - Dump the originators into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -668,7 +667,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
 }
 
 /**
- * batadv_v_init_sel_class - initialize GW selection class
+ * batadv_v_init_sel_class() - initialize GW selection class
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
@@ -704,7 +703,7 @@ static ssize_t batadv_v_show_sel_class(struct batadv_priv *bat_priv, char *buff)
 }
 
 /**
- * batadv_v_gw_throughput_get - retrieve the GW-bandwidth for a given GW
+ * batadv_v_gw_throughput_get() - retrieve the GW-bandwidth for a given GW
  * @gw_node: the GW to retrieve the metric for
  * @bw: the pointer where the metric will be stored. The metric is computed as
  *  the minimum between the GW advertised throughput and the path throughput to
@@ -747,7 +746,7 @@ static int batadv_v_gw_throughput_get(struct batadv_gw_node *gw_node, u32 *bw)
 }
 
 /**
- * batadv_v_gw_get_best_gw_node - retrieve the best GW node
+ * batadv_v_gw_get_best_gw_node() - retrieve the best GW node
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: the GW node having the best GW-metric, NULL if no GW is known
@@ -785,7 +784,7 @@ batadv_v_gw_get_best_gw_node(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_gw_is_eligible - check if a originator would be selected as GW
+ * batadv_v_gw_is_eligible() - check if a originator would be selected as GW
  * @bat_priv: the bat priv with all the soft interface information
  * @curr_gw_orig: originator representing the currently selected GW
  * @orig_node: the originator representing the new candidate
@@ -884,7 +883,7 @@ static int batadv_v_gw_write_buffer_text(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_gw_print - print the gateway list
+ * batadv_v_gw_print() - print the gateway list
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: gateway table seq_file struct
  */
@@ -913,7 +912,7 @@ static void batadv_v_gw_print(struct batadv_priv *bat_priv,
 #endif
 
 /**
- * batadv_v_gw_dump_entry - Dump a gateway into a message
+ * batadv_v_gw_dump_entry() - Dump a gateway into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1004,7 +1003,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_v_gw_dump - Dump gateways into a message
+ * batadv_v_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
@@ -1074,7 +1073,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
 };
 
 /**
- * batadv_v_hardif_init - initialize the algorithm specific fields in the
+ * batadv_v_hardif_init() - initialize the algorithm specific fields in the
  *  hard-interface object
  * @hard_iface: the hard-interface to initialize
  */
@@ -1088,7 +1087,7 @@ void batadv_v_hardif_init(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_mesh_init - initialize the B.A.T.M.A.N. V private resources for a
+ * batadv_v_mesh_init() - initialize the B.A.T.M.A.N. V private resources for a
  *  mesh
  * @bat_priv: the object representing the mesh interface to initialise
  *
@@ -1106,7 +1105,7 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_mesh_free - free the B.A.T.M.A.N. V private resources for a mesh
+ * batadv_v_mesh_free() - free the B.A.T.M.A.N. V private resources for a mesh
  * @bat_priv: the object representing the mesh interface to free
  */
 void batadv_v_mesh_free(struct batadv_priv *bat_priv)
@@ -1115,7 +1114,7 @@ void batadv_v_mesh_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_init - B.A.T.M.A.N. V initialization function
+ * batadv_v_init() - B.A.T.M.A.N. V initialization function
  *
  * Description: Takes care of initializing all the subcomponents.
  * It is invoked upon module load only.
diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h
index dd7c4b6..a17ab68 100644
--- a/net/batman-adv/bat_v.h
+++ b/net/batman-adv/bat_v.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 1de992c..a83478c 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
@@ -24,7 +25,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
@@ -41,18 +42,18 @@
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <net/cfg80211.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bat_v_ogm.h"
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 
 /**
- * batadv_v_elp_start_timer - restart timer for ELP periodic work
+ * batadv_v_elp_start_timer() - restart timer for ELP periodic work
  * @hard_iface: the interface for which the timer has to be reset
  */
 static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
@@ -67,7 +68,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_get_throughput - get the throughput towards a neighbour
+ * batadv_v_elp_get_throughput() - get the throughput towards a neighbour
  * @neigh: the neighbour for which the throughput has to be obtained
  *
  * Return: The throughput towards the given neighbour in multiples of 100kpbs
@@ -153,8 +154,8 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 }
 
 /**
- * batadv_v_elp_throughput_metric_update - worker updating the throughput metric
- *  of a single hop neighbour
+ * batadv_v_elp_throughput_metric_update() - worker updating the throughput
+ *  metric of a single hop neighbour
  * @work: the work queue item
  */
 void batadv_v_elp_throughput_metric_update(struct work_struct *work)
@@ -177,7 +178,7 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work)
 }
 
 /**
- * batadv_v_elp_wifi_neigh_probe - send link probing packets to a neighbour
+ * batadv_v_elp_wifi_neigh_probe() - send link probing packets to a neighbour
  * @neigh: the neighbour to probe
  *
  * Sends a predefined number of unicast wifi packets to a given neighbour in
@@ -240,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
 }
 
 /**
- * batadv_v_elp_periodic_work - ELP periodic task per interface
+ * batadv_v_elp_periodic_work() - ELP periodic task per interface
  * @work: work queue item
  *
  * Emits broadcast ELP message in regular intervals.
@@ -327,7 +328,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
 }
 
 /**
- * batadv_v_elp_iface_enable - setup the ELP interface private resources
+ * batadv_v_elp_iface_enable() - setup the ELP interface private resources
  * @hard_iface: interface for which the data has to be prepared
  *
  * Return: 0 on success or a -ENOMEM in case of failure.
@@ -375,7 +376,7 @@ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_iface_disable - release ELP interface private resources
+ * batadv_v_elp_iface_disable() - release ELP interface private resources
  * @hard_iface: interface for which the resources have to be released
  */
 void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -387,7 +388,7 @@ void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_elp_iface_activate - update the ELP buffer belonging to the given
+ * batadv_v_elp_iface_activate() - update the ELP buffer belonging to the given
  *  hard-interface
  * @primary_iface: the new primary interface
  * @hard_iface: interface holding the to-be-updated buffer
@@ -408,7 +409,7 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface,
 }
 
 /**
- * batadv_v_elp_primary_iface_set - change internal data to reflect the new
+ * batadv_v_elp_primary_iface_set() - change internal data to reflect the new
  *  primary interface
  * @primary_iface: the new primary interface
  */
@@ -428,7 +429,7 @@ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_elp_neigh_update - update an ELP neighbour node
+ * batadv_v_elp_neigh_update() - update an ELP neighbour node
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh_addr: the neighbour interface address
  * @if_incoming: the interface the packet was received through
@@ -488,7 +489,7 @@ static void batadv_v_elp_neigh_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_elp_packet_recv - main ELP packet handler
+ * batadv_v_elp_packet_recv() - main ELP packet handler
  * @skb: the received packet
  * @if_incoming: the interface this packet was received through
  *
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
index 376ead2..5e39d05 100644
--- a/net/batman-adv/bat_v_elp.h
+++ b/net/batman-adv/bat_v_elp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index c251445..ba59b77 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
@@ -38,20 +39,20 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 /**
- * batadv_v_ogm_orig_get - retrieve and possibly create an originator node
+ * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  *
@@ -88,7 +89,7 @@ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_start_timer - restart the OGM sending timer
+ * batadv_v_ogm_start_timer() - restart the OGM sending timer
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
@@ -107,7 +108,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_send_to_if - send a batman ogm using a given interface
+ * batadv_v_ogm_send_to_if() - send a batman ogm using a given interface
  * @skb: the OGM to send
  * @hard_iface: the interface to use to send the OGM
  */
@@ -127,7 +128,7 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_send - periodic worker broadcasting the own OGM
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
  * @work: work queue item
  */
 static void batadv_v_ogm_send(struct work_struct *work)
@@ -235,7 +236,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
 }
 
 /**
- * batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
+ * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
  * @hard_iface: the interface to prepare
  *
  * Takes care of scheduling own OGM sending routine for this interface.
@@ -252,7 +253,7 @@ int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_v_ogm_primary_iface_set - set a new primary interface
+ * batadv_v_ogm_primary_iface_set() - set a new primary interface
  * @primary_iface: the new primary interface
  */
 void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
@@ -268,8 +269,8 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
 }
 
 /**
- * batadv_v_forward_penalty - apply a penalty to the throughput metric forwarded
- *  with B.A.T.M.A.N. V OGMs
+ * batadv_v_forward_penalty() - apply a penalty to the throughput metric
+ *  forwarded with B.A.T.M.A.N. V OGMs
  * @bat_priv: the bat priv with all the soft interface information
  * @if_incoming: the interface where the OGM has been received
  * @if_outgoing: the interface where the OGM has to be forwarded to
@@ -314,7 +315,7 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_forward - check conditions and forward an OGM to the given
+ * batadv_v_ogm_forward() - check conditions and forward an OGM to the given
  *  outgoing interface
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_received: previously received OGM to be forwarded
@@ -405,7 +406,7 @@ static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_metric_update - update route metric based on OGM
+ * batadv_v_ogm_metric_update() - update route metric based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm2: OGM2 structure
  * @orig_node: Originator structure for which the OGM has been received
@@ -490,7 +491,7 @@ static int batadv_v_ogm_metric_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_route_update - update routes based on OGM
+ * batadv_v_ogm_route_update() - update routes based on OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -590,7 +591,7 @@ static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_process_per_outif - process a batman v OGM for an outgoing if
+ * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the Ethernet header of the OGM2
  * @ogm2: OGM2 structure
@@ -639,7 +640,7 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_v_ogm_aggr_packet - checks if there is another OGM aggregated
+ * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
  * @buff_pos: current position in the skb
  * @packet_len: total length of the skb
  * @tvlv_len: tvlv length of the previously considered OGM
@@ -659,7 +660,7 @@ static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
 }
 
 /**
- * batadv_v_ogm_process - process an incoming batman v OGM
+ * batadv_v_ogm_process() - process an incoming batman v OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
  * @if_incoming: the interface where this packet was receved
@@ -787,7 +788,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
 }
 
 /**
- * batadv_v_ogm_packet_recv - OGM2 receiving handler
+ * batadv_v_ogm_packet_recv() - OGM2 receiving handler
  * @skb: the received OGM
  * @if_incoming: the interface where this OGM has been received
  *
@@ -851,7 +852,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_init - initialise the OGM2 engine
+ * batadv_v_ogm_init() - initialise the OGM2 engine
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or a negative error code in case of failure
@@ -884,7 +885,7 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_v_ogm_free - free OGM private resources
+ * batadv_v_ogm_free() - free OGM private resources
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_v_ogm_free(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
index 2068770..6a4c14c 100644
--- a/net/batman-adv/bat_v_ogm.h
+++ b/net/batman-adv/bat_v_ogm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 2b070c7..bdc1ef0 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -32,7 +33,7 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
 }
 
 /**
- * batadv_bit_get_packet - receive and process one packet within the sequence
+ * batadv_bit_get_packet() - receive and process one packet within the sequence
  *  number window
  * @priv: the bat priv with all the soft interface information
  * @seq_bits: pointer to the sequence number receive packet
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index cc262c9..ca9d075 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -26,7 +27,7 @@
 #include <linux/types.h>
 
 /**
- * batadv_test_bit - check if bit is set in the current window
+ * batadv_test_bit() - check if bit is set in the current window
  *
  * @seq_bits: pointer to the sequence number receive packet
  * @last_seqno: latest sequence number in seq_bits
@@ -46,7 +47,12 @@ static inline bool batadv_test_bit(const unsigned long *seq_bits,
 	return test_bit(diff, seq_bits) != 0;
 }
 
-/* turn corresponding bit on, so we can remember that we got the packet */
+/**
+ * batadv_set_bit() - Turn corresponding bit on, so we can remember that we got
+ *  the packet
+ * @seq_bits: bitmap of the packet receive window
+ * @n: relative sequence number of newly received packet
+ */
 static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
 {
 	/* if too old, just drop it */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index cdd8e8e..fad4785 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -24,7 +25,7 @@
 #include <linux/crc16.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -49,6 +50,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
@@ -56,7 +58,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
@@ -69,7 +70,7 @@ batadv_bla_send_announce(struct batadv_priv *bat_priv,
 			 struct batadv_bla_backbone_gw *backbone_gw);
 
 /**
- * batadv_choose_claim - choose the right bucket for a claim.
+ * batadv_choose_claim() - choose the right bucket for a claim.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -87,7 +88,7 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
 }
 
 /**
- * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
+ * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -105,7 +106,7 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 }
 
 /**
- * batadv_compare_backbone_gw - compare address and vid of two backbone gws
+ * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
  * @node: list node of the first entry to compare
  * @data2: pointer to the second backbone gateway
  *
@@ -129,7 +130,7 @@ static bool batadv_compare_backbone_gw(const struct hlist_node *node,
 }
 
 /**
- * batadv_compare_claim - compare address and vid of two claims
+ * batadv_compare_claim() - compare address and vid of two claims
  * @node: list node of the first entry to compare
  * @data2: pointer to the second claims
  *
@@ -153,7 +154,7 @@ static bool batadv_compare_claim(const struct hlist_node *node,
 }
 
 /**
- * batadv_backbone_gw_release - release backbone gw from lists and queue for
+ * batadv_backbone_gw_release() - release backbone gw from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the backbone gw
  */
@@ -168,7 +169,7 @@ static void batadv_backbone_gw_release(struct kref *ref)
 }
 
 /**
- * batadv_backbone_gw_put - decrement the backbone gw refcounter and possibly
+ * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
  *  release it
  * @backbone_gw: backbone gateway to be free'd
  */
@@ -178,8 +179,8 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_claim_release - release claim from lists and queue for free after rcu
- *  grace period
+ * batadv_claim_release() - release claim from lists and queue for free after
+ *  rcu grace period
  * @ref: kref pointer of the claim
  */
 static void batadv_claim_release(struct kref *ref)
@@ -204,8 +205,7 @@ static void batadv_claim_release(struct kref *ref)
 }
 
 /**
- * batadv_claim_put - decrement the claim refcounter and possibly
- *  release it
+ * batadv_claim_put() - decrement the claim refcounter and possibly release it
  * @claim: claim to be free'd
  */
 static void batadv_claim_put(struct batadv_bla_claim *claim)
@@ -214,7 +214,7 @@ static void batadv_claim_put(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_claim_hash_find - looks for a claim in the claim hash
+ * batadv_claim_hash_find() - looks for a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @data: search data (may be local/static data)
  *
@@ -253,7 +253,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_backbone_hash_find - looks for a backbone gateway in the hash
+ * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  * @vid: the VLAN ID
@@ -297,7 +297,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
 }
 
 /**
- * batadv_bla_del_backbone_claims - delete all claims for a backbone
+ * batadv_bla_del_backbone_claims() - delete all claims for a backbone
  * @backbone_gw: backbone gateway where the claims should be removed
  */
 static void
@@ -337,7 +337,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_claim - sends a claim frame according to the provided info
+ * batadv_bla_send_claim() - sends a claim frame according to the provided info
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address to be announced within the claim
  * @vid: the VLAN ID
@@ -457,7 +457,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
 }
 
 /**
- * batadv_bla_loopdetect_report - worker for reporting the loop
+ * batadv_bla_loopdetect_report() - worker for reporting the loop
  * @work: work queue item
  *
  * Throws an uevent, as the loopdetect check function can't do that itself
@@ -487,7 +487,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work)
 }
 
 /**
- * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
+ * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
@@ -560,7 +560,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
+ * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface
  * @vid: VLAN identifier
@@ -586,7 +586,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_answer_request - answer a bla request by sending own claims
+ * batadv_bla_answer_request() - answer a bla request by sending own claims
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: interface where the request came on
  * @vid: the vid where the request came on
@@ -636,7 +636,7 @@ static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_request - send a request to repeat claims
+ * batadv_bla_send_request() - send a request to repeat claims
  * @backbone_gw: the backbone gateway from whom we are out of sync
  *
  * When the crc is wrong, ask the backbone gateway for a full table update.
@@ -663,7 +663,7 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_announce - Send an announcement frame
+ * batadv_bla_send_announce() - Send an announcement frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: our backbone gateway which should be announced
  */
@@ -684,7 +684,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_add_claim - Adds a claim in the claim hash
+ * batadv_bla_add_claim() - Adds a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: the mac address of the claim
  * @vid: the VLAN ID of the frame
@@ -774,7 +774,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
  *  claim
  * @claim: claim whose backbone_gw should be returned
  *
@@ -794,7 +794,7 @@ batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_bla_del_claim - delete a claim from the claim hash
+ * batadv_bla_del_claim() - delete a claim from the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @mac: mac address of the claim to be removed
  * @vid: VLAN id for the claim to be removed
@@ -822,7 +822,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_announce - check for ANNOUNCE frame
+ * batadv_handle_announce() - check for ANNOUNCE frame
  * @bat_priv: the bat priv with all the soft interface information
  * @an_addr: announcement mac address (ARP Sender HW address)
  * @backbone_addr: originator address of the sender (Ethernet source MAC)
@@ -880,7 +880,7 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
 }
 
 /**
- * batadv_handle_request - check for REQUEST frame
+ * batadv_handle_request() - check for REQUEST frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
@@ -913,7 +913,7 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_unclaim - check for UNCLAIM frame
+ * batadv_handle_unclaim() - check for UNCLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet source)
@@ -951,7 +951,7 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_handle_claim - check for CLAIM frame
+ * batadv_handle_claim() - check for CLAIM frame
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @backbone_addr: originator address of the backbone (Ethernet Source)
@@ -988,7 +988,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_check_claim_group - check for claim group membership
+ * batadv_check_claim_group() - check for claim group membership
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
@@ -1063,7 +1063,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_process_claim - Check if this is a claim frame, and process it
+ * batadv_bla_process_claim() - Check if this is a claim frame, and process it
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
@@ -1205,7 +1205,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
+ * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
  *  immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @now: whether the whole hash shall be wiped now
@@ -1258,7 +1258,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
 }
 
 /**
- * batadv_bla_purge_claims - Remove claims after a timeout or immediately
+ * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface, may be NULL if now is set
  * @now: whether the whole hash shall be wiped now
@@ -1316,7 +1316,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_update_orig_address - Update the backbone gateways when the own
+ * batadv_bla_update_orig_address() - Update the backbone gateways when the own
  *  originator address changes
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the new selected primary_if
@@ -1372,7 +1372,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_send_loopdetect - send a loopdetect frame
+ * batadv_bla_send_loopdetect() - send a loopdetect frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: the backbone gateway for which a loop should be detected
  *
@@ -1392,7 +1392,7 @@ batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_status_update - purge bla interfaces if necessary
+ * batadv_bla_status_update() - purge bla interfaces if necessary
  * @net_dev: the soft interface net device
  */
 void batadv_bla_status_update(struct net_device *net_dev)
@@ -1412,7 +1412,7 @@ void batadv_bla_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_bla_periodic_work - performs periodic bla work
+ * batadv_bla_periodic_work() - performs periodic bla work
  * @work: kernel work struct
  *
  * periodic work to do:
@@ -1517,7 +1517,7 @@ static struct lock_class_key batadv_claim_hash_lock_class_key;
 static struct lock_class_key batadv_backbone_hash_lock_class_key;
 
 /**
- * batadv_bla_init - initialize all bla structures
+ * batadv_bla_init() - initialize all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success, < 0 on error.
@@ -1579,7 +1579,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: contains the bcast_packet to be checked
  *
@@ -1652,7 +1652,7 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
+ * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
  *  the VLAN identified by vid.
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
@@ -1692,7 +1692,7 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
+ * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
  * @skb: the frame to be checked
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
@@ -1726,7 +1726,7 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_free - free all bla structures
+ * batadv_bla_free() - free all bla structures
  * @bat_priv: the bat priv with all the soft interface information
  *
  * for softinterface free or module unload
@@ -1753,7 +1753,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_loopdetect_check - check and handle a detected loop
+ * batadv_bla_loopdetect_check() - check and handle a detected loop
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the packet to check
  * @primary_if: interface where the request came on
@@ -1802,7 +1802,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_rx - check packets coming from the mesh.
+ * batadv_bla_rx() - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -1914,7 +1914,7 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_bla_tx - check packets going into the mesh
+ * batadv_bla_tx() - check packets going into the mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
@@ -2022,7 +2022,7 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
+ * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2084,7 +2084,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_bla_claim_dump_entry - dump one entry of the claim table
+ * batadv_bla_claim_dump_entry() - dump one entry of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2143,7 +2143,7 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_bla_claim_dump_bucket - dump one bucket of the claim table
+ * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
  * to a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
@@ -2180,7 +2180,7 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_bla_claim_dump - dump claim table to a netlink socket
+ * batadv_bla_claim_dump() - dump claim table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2247,8 +2247,8 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
- *  file
+ * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
+ *  seq file
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -2312,8 +2312,8 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_bla_backbone_dump_entry - dump one entry of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
+ *  netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2373,8 +2373,8 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_bla_backbone_dump_bucket - dump one bucket of the backbone table
- * to a netlink socket
+ * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
+ *  a netlink socket
  * @msg: buffer for the message
  * @portid: netlink port
  * @seq: Sequence number of netlink message
@@ -2410,7 +2410,7 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_bla_backbone_dump - dump backbone table to a netlink socket
+ * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
  * @msg: buffer for the message
  * @cb: callback structure containing arguments
  *
@@ -2477,7 +2477,7 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_bla_check_claim - check if address is claimed
+ * batadv_bla_check_claim() - check if address is claimed
  *
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of which the claim status is checked
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 2347757..b27571a 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
@@ -30,8 +31,8 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect
- *  frame sent by bridge loop avoidance
+ * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop
+ *  detect frame sent by bridge loop avoidance
  * @mac: mac address to check
  *
  * Return: true if the it looks like a loop detect frame
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index e32ad47..21d1189 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -25,7 +26,6 @@
 #include <linux/fs.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
-#include <linux/sched.h> /* for linux/wait.h */
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/stddef.h>
@@ -66,8 +66,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
 }
 
 /**
- * batadv_originators_hardif_open - handles debugfs output for the
- *  originator table of an hard interface
+ * batadv_originators_hardif_open() - handles debugfs output for the originator
+ *  table of an hard interface
  * @inode: inode pointer to debugfs file
  * @file: pointer to the seq_file
  *
@@ -117,7 +117,7 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 /**
- * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
+ * batadv_dat_cache_open() - Prepare file handler for reads from dat_chache
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -154,7 +154,7 @@ static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
 
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
- * batadv_mcast_flags_open - prepare file handler for reads from mcast_flags
+ * batadv_mcast_flags_open() - prepare file handler for reads from mcast_flags
  * @inode: inode which was opened
  * @file: file handle to be initialized
  *
@@ -259,6 +259,9 @@ static struct batadv_debuginfo *batadv_hardif_debuginfos[] = {
 	NULL,
 };
 
+/**
+ * batadv_debugfs_init() - Initialize soft interface independent debugfs entries
+ */
 void batadv_debugfs_init(void)
 {
 	struct batadv_debuginfo **bat_debug;
@@ -289,6 +292,9 @@ void batadv_debugfs_init(void)
 	batadv_debugfs = NULL;
 }
 
+/**
+ * batadv_debugfs_destroy() - Remove all debugfs entries
+ */
 void batadv_debugfs_destroy(void)
 {
 	debugfs_remove_recursive(batadv_debugfs);
@@ -296,7 +302,7 @@ void batadv_debugfs_destroy(void)
 }
 
 /**
- * batadv_debugfs_add_hardif - creates the base directory for a hard interface
+ * batadv_debugfs_add_hardif() - creates the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which should be added.
  *
@@ -338,7 +344,7 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_debugfs_del_hardif - delete the base directory for a hard interface
+ * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which is deleted.
  */
@@ -355,6 +361,12 @@ void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
 	}
 }
 
+/**
+ * batadv_debugfs_add_meshif() - Initialize interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debugfs_add_meshif(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -401,6 +413,10 @@ int batadv_debugfs_add_meshif(struct net_device *dev)
 	return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_debugfs_del_meshif(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 9c5d4a6..90a08d3 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 760c0de..9703c79 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
@@ -55,7 +56,7 @@
 static void batadv_dat_purge(struct work_struct *work);
 
 /**
- * batadv_dat_start_timer - initialise the DAT periodic worker
+ * batadv_dat_start_timer() - initialise the DAT periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
@@ -66,7 +67,7 @@ static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_entry_release - release dat_entry from lists and queue for free
+ * batadv_dat_entry_release() - release dat_entry from lists and queue for free
  *  after rcu grace period
  * @ref: kref pointer of the dat_entry
  */
@@ -80,7 +81,7 @@ static void batadv_dat_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_dat_entry_put - decrement the dat_entry refcounter and possibly
+ * batadv_dat_entry_put() - decrement the dat_entry refcounter and possibly
  *  release it
  * @dat_entry: dat_entry to be free'd
  */
@@ -90,7 +91,7 @@ static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
+ * batadv_dat_to_purge() - check whether a dat_entry has to be purged or not
  * @dat_entry: the entry to check
  *
  * Return: true if the entry has to be purged now, false otherwise.
@@ -102,7 +103,7 @@ static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
 }
 
 /**
- * __batadv_dat_purge - delete entries from the DAT local storage
+ * __batadv_dat_purge() - delete entries from the DAT local storage
  * @bat_priv: the bat priv with all the soft interface information
  * @to_purge: function in charge to decide whether an entry has to be purged or
  *	      not. This function takes the dat_entry as argument and has to
@@ -145,8 +146,8 @@ static void __batadv_dat_purge(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_purge - periodic task that deletes old entries from the local DAT
- * hash table
+ * batadv_dat_purge() - periodic task that deletes old entries from the local
+ *  DAT hash table
  * @work: kernel work struct
  */
 static void batadv_dat_purge(struct work_struct *work)
@@ -164,7 +165,7 @@ static void batadv_dat_purge(struct work_struct *work)
 }
 
 /**
- * batadv_compare_dat - comparing function used in the local DAT hash table
+ * batadv_compare_dat() - comparing function used in the local DAT hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -179,7 +180,7 @@ static bool batadv_compare_dat(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_arp_hw_src - extract the hw_src field from an ARP packet
+ * batadv_arp_hw_src() - extract the hw_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -196,7 +197,7 @@ static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_src - extract the ip_src field from an ARP packet
+ * batadv_arp_ip_src() - extract the ip_src field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -208,7 +209,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet
+ * batadv_arp_hw_dst() - extract the hw_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -220,7 +221,7 @@ static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet
+ * batadv_arp_ip_dst() - extract the ip_dst field from an ARP packet
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
@@ -232,7 +233,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
 }
 
 /**
- * batadv_hash_dat - compute the hash value for an IP address
+ * batadv_hash_dat() - compute the hash value for an IP address
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -267,7 +268,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
 }
 
 /**
- * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
+ * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
@@ -310,7 +311,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
 }
 
 /**
- * batadv_dat_entry_add - add a new dat entry or update it if already exists
+ * batadv_dat_entry_add() - add a new dat entry or update it if already exists
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: ipv4 to add/edit
  * @mac_addr: mac address to assign to the given ipv4
@@ -367,7 +368,8 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 
 /**
- * batadv_dbg_arp - print a debug message containing all the ARP packet details
+ * batadv_dbg_arp() - print a debug message containing all the ARP packet
+ *  details
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
@@ -448,7 +450,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
 #endif /* CONFIG_BATMAN_ADV_DEBUG */
 
 /**
- * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate
+ * batadv_is_orig_node_eligible() - check whether a node can be a DHT candidate
  * @res: the array with the already selected candidates
  * @select: number of already selected candidates
  * @tmp_max: address of the currently evaluated node
@@ -502,7 +504,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
 }
 
 /**
- * batadv_choose_next_candidate - select the next DHT candidate
+ * batadv_choose_next_candidate() - select the next DHT candidate
  * @bat_priv: the bat priv with all the soft interface information
  * @cands: candidates array
  * @select: number of candidates already present in the array
@@ -566,8 +568,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_select_candidates - select the nodes which the DHT message has to
- * be sent to
+ * batadv_dat_select_candidates() - select the nodes which the DHT message has
+ *  to be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
  * @vid: VLAN identifier
@@ -612,7 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
 }
 
 /**
- * batadv_dat_send_data - send a payload to the selected candidates
+ * batadv_dat_send_data() - send a payload to the selected candidates
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
@@ -688,7 +690,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat
  *  setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -710,7 +712,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_status_update - update the dat tvlv container after dat
+ * batadv_dat_status_update() - update the dat tvlv container after dat
  *  setting change
  * @net_dev: the soft interface net device
  */
@@ -722,7 +724,7 @@ void batadv_dat_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_dat_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -741,7 +743,7 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_hash_free - free the local DAT hash table
+ * batadv_dat_hash_free() - free the local DAT hash table
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
@@ -757,7 +759,7 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_init - initialise the DAT internals
+ * batadv_dat_init() - initialise the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 in case of success, a negative error code otherwise
@@ -782,7 +784,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_dat_free - free the DAT internals
+ * batadv_dat_free() - free the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_dat_free(struct batadv_priv *bat_priv)
@@ -797,7 +799,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_dat_cache_seq_print_text - print the local DAT hash table
+ * batadv_dat_cache_seq_print_text() - print the local DAT hash table
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -850,7 +852,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_arp_get_type - parse an ARP packet and gets the type
+ * batadv_arp_get_type() - parse an ARP packet and gets the type
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to analyse
  * @hdr_size: size of the possible header before the ARP packet in the skb
@@ -924,7 +926,7 @@ static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * batadv_dat_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet to extract the VID from
  * @hdr_size: the size of the batman-adv header encapsulating the packet
  *
@@ -950,7 +952,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
 }
 
 /**
- * batadv_dat_arp_create_reply - create an ARP Reply
+ * batadv_dat_arp_create_reply() - create an ARP Reply
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_src: ARP sender IP
  * @ip_dst: ARP target IP
@@ -985,7 +987,7 @@ batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src,
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to
  * answer using DAT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1083,7 +1085,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to
+ * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to
  * answer using the local DAT storage
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
@@ -1153,7 +1155,7 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT
+ * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  */
@@ -1193,8 +1195,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local
- * DAT storage only
+ * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the
+ *  local DAT storage only
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
@@ -1282,8 +1284,8 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped
- * (because the node has already obtained the reply via DAT) or not
+ * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be
+ *  dropped (because the node has already obtained the reply via DAT) or not
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the broadcast packet
  *
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index ec364a3..12897eb 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
@@ -23,9 +24,9 @@
 #include <linux/compiler.h>
 #include <linux/netdevice.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 
 struct seq_file;
 struct sk_buff;
@@ -48,7 +49,7 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
 				      struct batadv_forw_packet *forw_packet);
 
 /**
- * batadv_dat_init_orig_node_addr - assign a DAT address to the orig_node
+ * batadv_dat_init_orig_node_addr() - assign a DAT address to the orig_node
  * @orig_node: the node to assign the DAT address to
  */
 static inline void
@@ -61,7 +62,7 @@ batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_dat_init_own_addr - assign a DAT address to the node itself
+ * batadv_dat_init_own_addr() - assign a DAT address to the node itself
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: a pointer to the primary interface
  */
@@ -82,7 +83,7 @@ void batadv_dat_free(struct batadv_priv *bat_priv);
 int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset);
 
 /**
- * batadv_dat_inc_counter - increment the correct DAT packet counter
+ * batadv_dat_inc_counter() - increment the correct DAT packet counter
  * @bat_priv: the bat priv with all the soft interface information
  * @subtype: the 4addr subtype of the packet to be counted
  *
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index ebe6e38..22dde42 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
@@ -32,16 +33,16 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
 
 /**
- * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
  * @dropped: whether the chain is cleared because all fragments are dropped
  *
@@ -65,7 +66,7 @@ static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
 }
 
 /**
- * batadv_frag_purge_orig - free fragments associated to an orig
+ * batadv_frag_purge_orig() - free fragments associated to an orig
  * @orig_node: originator to free fragments from
  * @check_cb: optional function to tell if an entry should be purged
  */
@@ -89,7 +90,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
  *
  * Return: the maximum size of payload that can be fragmented.
  */
@@ -104,7 +105,7 @@ static int batadv_frag_size_limit(void)
 }
 
 /**
- * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
  * @chain: chain in fragments table to init
  * @seqno: sequence number of the received fragment
  *
@@ -134,7 +135,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
 }
 
 /**
- * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * batadv_frag_insert_packet() - insert a fragment into a fragment chain
  * @orig_node: originator that the fragment was received from
  * @skb: skb to insert
  * @chain_out: list head to attach complete chains of fragments to
@@ -248,7 +249,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_frag_merge_packets - merge a chain of fragments
+ * batadv_frag_merge_packets() - merge a chain of fragments
  * @chain: head of chain with fragments
  *
  * Expand the first skb in the chain and copy the content of the remaining
@@ -306,7 +307,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
 }
 
 /**
- * batadv_frag_skb_buffer - buffer fragment for later merge
+ * batadv_frag_skb_buffer() - buffer fragment for later merge
  * @skb: skb to buffer
  * @orig_node_src: originator that the skb is received from
  *
@@ -346,7 +347,7 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
 }
 
 /**
- * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
  * @skb: skb to forward
  * @recv_if: interface that the skb is received on
  * @orig_node_src: originator that the skb is received from
@@ -400,7 +401,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
 }
 
 /**
- * batadv_frag_create - create a fragment from skb
+ * batadv_frag_create() - create a fragment from skb
  * @skb: skb to create fragment from
  * @frag_head: header to use in new fragment
  * @fragment_size: size of new fragment
@@ -438,7 +439,7 @@ static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
 }
 
 /**
- * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
  * @skb: skb to create fragments from
  * @orig_node: final destination of the created fragments
  * @neigh_node: next-hop of the created fragments
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index 1a2d6c3..138b22a 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
@@ -39,7 +40,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
 			    struct batadv_neigh_node *neigh_node);
 
 /**
- * batadv_frag_check_entry - check if a list of fragments has timed out
+ * batadv_frag_check_entry() - check if a list of fragments has timed out
  * @frags_entry: table entry to check
  *
  * Return: true if the frags entry has timed out, false otherwise.
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 10d521f..37fe9a6 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -22,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
@@ -42,6 +43,7 @@
 #include <linux/stddef.h>
 #include <linux/udp.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "gateway_common.h"
@@ -49,7 +51,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "soft-interface.h"
 #include "sysfs.h"
@@ -68,8 +69,8 @@
 #define BATADV_DHCP_CHADDR_OFFSET	28
 
 /**
- * batadv_gw_node_release - release gw_node from lists and queue for free after
- *  rcu grace period
+ * batadv_gw_node_release() - release gw_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the gw_node
  */
 static void batadv_gw_node_release(struct kref *ref)
@@ -83,7 +84,8 @@ static void batadv_gw_node_release(struct kref *ref)
 }
 
 /**
- * batadv_gw_node_put - decrement the gw_node refcounter and possibly release it
+ * batadv_gw_node_put() - decrement the gw_node refcounter and possibly release
+ *  it
  * @gw_node: gateway node to free
  */
 void batadv_gw_node_put(struct batadv_gw_node *gw_node)
@@ -91,6 +93,12 @@ void batadv_gw_node_put(struct batadv_gw_node *gw_node)
 	kref_put(&gw_node->refcount, batadv_gw_node_release);
 }
 
+/**
+ * batadv_gw_get_selected_gw_node() - Get currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_gw_node *
 batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
 {
@@ -109,6 +117,12 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
 	return gw_node;
 }
 
+/**
+ * batadv_gw_get_selected_orig() - Get originator of currently selected gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: orig_node of selected gateway (with increased refcnt), NULL on errors
+ */
 struct batadv_orig_node *
 batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
 {
@@ -155,7 +169,7 @@ static void batadv_gw_select(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_reselect - force a gateway reselection
+ * batadv_gw_reselect() - force a gateway reselection
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Set a flag to remind the GW component to perform a new gateway reselection.
@@ -171,7 +185,7 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_check_client_stop - check if client mode has been switched off
+ * batadv_gw_check_client_stop() - check if client mode has been switched off
  * @bat_priv: the bat priv with all the soft interface information
  *
  * This function assumes the caller has checked that the gw state *is actually
@@ -202,6 +216,10 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
 	batadv_gw_node_put(curr_gw);
 }
 
+/**
+ * batadv_gw_election() - Elect the best gateway
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_election(struct batadv_priv *bat_priv)
 {
 	struct batadv_gw_node *curr_gw = NULL;
@@ -290,6 +308,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
 		batadv_neigh_ifinfo_put(router_ifinfo);
 }
 
+/**
+ * batadv_gw_check_election() - Elect orig node as best gateway when eligible
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is to be checked
+ */
 void batadv_gw_check_election(struct batadv_priv *bat_priv,
 			      struct batadv_orig_node *orig_node)
 {
@@ -321,7 +344,7 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_add - add gateway node to list of available gateways
+ * batadv_gw_node_add() - add gateway node to list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  * @gateway: announced bandwidth information
@@ -364,7 +387,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * batadv_gw_node_get() - retrieve gateway node from list of available gateways
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  *
@@ -393,7 +416,7 @@ struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_node_update - update list of available gateways with changed
+ * batadv_gw_node_update() - update list of available gateways with changed
  *  bandwidth information
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
@@ -458,6 +481,11 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
 		batadv_gw_node_put(gw_node);
 }
 
+/**
+ * batadv_gw_node_delete() - Remove orig_node from gateway list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which is currently in process of being removed
+ */
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 			   struct batadv_orig_node *orig_node)
 {
@@ -469,6 +497,10 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 	batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
+/**
+ * batadv_gw_node_free() - Free gateway information from soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_gw_node_free(struct batadv_priv *bat_priv)
 {
 	struct batadv_gw_node *gw_node;
@@ -484,6 +516,14 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_gw_client_seq_print_text() - Print the gateway table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -514,7 +554,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_gw_dump - Dump gateways into a message
+ * batadv_gw_dump() - Dump gateways into a message
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  *
@@ -567,7 +607,7 @@ int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * batadv_gw_dhcp_recipient_get - check if a packet is a DHCP message
+ * batadv_gw_dhcp_recipient_get() - check if a packet is a DHCP message
  * @skb: the packet to check
  * @header_len: a pointer to the batman-adv header size
  * @chaddr: buffer where the client address will be stored. Valid
@@ -686,7 +726,8 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
 }
 
 /**
- * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * batadv_gw_out_of_range() - check if the dhcp request destination is the best
+ *  gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the outgoing packet
  *
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 3baa3d4..981f584 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 2c26039..b3e156a 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -26,15 +27,15 @@
 #include <linux/netdevice.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "gateway_client.h"
 #include "log.h"
-#include "packet.h"
 #include "tvlv.h"
 
 /**
- * batadv_parse_throughput - parse supplied string buffer to extract throughput
- *  information
+ * batadv_parse_throughput() - parse supplied string buffer to extract
+ *  throughput information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @description: text shown when throughput string cannot be parsed
@@ -100,8 +101,8 @@ bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
- *  and upload bandwidth information
+ * batadv_parse_gw_bandwidth() - parse supplied string buffer to extract
+ *  download and upload bandwidth information
  * @net_dev: the soft interface net device
  * @buff: string buffer to parse
  * @down: pointer holding the returned download bandwidth information
@@ -136,8 +137,8 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
- *  setting change
+ * batadv_gw_tvlv_container_update() - update the gw tvlv container after
+ *  gateway setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -164,6 +165,15 @@ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
 	}
 }
 
+/**
+ * batadv_gw_bandwidth_set() - Parse and set download/upload gateway bandwidth
+ *  from supplied string buffer
+ * @net_dev: netdev struct of the soft interface
+ * @buff: the buffer containing the user data
+ * @count: number of bytes in the buffer
+ *
+ * Return: 'count' on success or a negative error code in case of failure
+ */
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 				size_t count)
 {
@@ -207,7 +217,7 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
 }
 
 /**
- * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -248,7 +258,7 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_gw_init - initialise the gateway handling internals
+ * batadv_gw_init() - initialise the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_init(struct batadv_priv *bat_priv)
@@ -264,7 +274,7 @@ void batadv_gw_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_gw_free - free the gateway handling internals
+ * batadv_gw_free() - free the gateway handling internals
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_gw_free(struct batadv_priv *bat_priv)
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 0a6a97d..afebd9c 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -32,11 +33,12 @@ enum batadv_gw_modes {
 
 /**
  * enum batadv_bandwidth_units - bandwidth unit types
- * @BATADV_BW_UNIT_KBIT: unit type kbit
- * @BATADV_BW_UNIT_MBIT: unit type mbit
  */
 enum batadv_bandwidth_units {
+	/** @BATADV_BW_UNIT_KBIT: unit type kbit */
 	BATADV_BW_UNIT_KBIT,
+
+	/** @BATADV_BW_UNIT_MBIT: unit type mbit */
 	BATADV_BW_UNIT_MBIT,
 };
 
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4e3d534..5f186bf 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -22,7 +23,7 @@
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -37,6 +38,7 @@
 #include <linux/spinlock.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_v.h"
 #include "bridge_loop_avoidance.h"
@@ -45,14 +47,13 @@
 #include "gateway_client.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
 /**
- * batadv_hardif_release - release hard interface from lists and queue for
+ * batadv_hardif_release() - release hard interface from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the hard interface
  */
@@ -66,6 +67,12 @@ void batadv_hardif_release(struct kref *ref)
 	kfree_rcu(hard_iface, rcu);
 }
 
+/**
+ * batadv_hardif_get_by_netdev() - Get hard interface object of a net_device
+ * @net_dev: net_device to search for
+ *
+ * Return: batadv_hard_iface of net_dev (with increased refcnt), NULL on errors
+ */
 struct batadv_hard_iface *
 batadv_hardif_get_by_netdev(const struct net_device *net_dev)
 {
@@ -86,7 +93,7 @@ batadv_hardif_get_by_netdev(const struct net_device *net_dev)
 }
 
 /**
- * batadv_getlink_net - return link net namespace (of use fallback)
+ * batadv_getlink_net() - return link net namespace (of use fallback)
  * @netdev: net_device to check
  * @fallback_net: return in case get_link_net is not available for @netdev
  *
@@ -105,7 +112,7 @@ static struct net *batadv_getlink_net(const struct net_device *netdev,
 }
 
 /**
- * batadv_mutual_parents - check if two devices are each others parent
+ * batadv_mutual_parents() - check if two devices are each others parent
  * @dev1: 1st net dev
  * @net1: 1st devices netns
  * @dev2: 2nd net dev
@@ -138,7 +145,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
 }
 
 /**
- * batadv_is_on_batman_iface - check if a device is a batman iface descendant
+ * batadv_is_on_batman_iface() - check if a device is a batman iface descendant
  * @net_dev: the device to check
  *
  * If the user creates any virtual device on top of a batman-adv interface, it
@@ -202,7 +209,7 @@ static bool batadv_is_valid_iface(const struct net_device *net_dev)
 }
 
 /**
- * batadv_get_real_netdevice - check if the given netdev struct is a virtual
+ * batadv_get_real_netdevice() - check if the given netdev struct is a virtual
  *  interface on top of another 'real' interface
  * @netdev: the device to check
  *
@@ -246,7 +253,7 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
 }
 
 /**
- * batadv_get_real_netdev - check if the given net_device struct is a virtual
+ * batadv_get_real_netdev() - check if the given net_device struct is a virtual
  *  interface on top of another 'real' interface
  * @net_device: the device to check
  *
@@ -265,7 +272,7 @@ struct net_device *batadv_get_real_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_wext_netdev - check if the given net_device struct is a
+ * batadv_is_wext_netdev() - check if the given net_device struct is a
  *  wext wifi interface
  * @net_device: the device to check
  *
@@ -289,7 +296,7 @@ static bool batadv_is_wext_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_is_cfg80211_netdev - check if the given net_device struct is a
+ * batadv_is_cfg80211_netdev() - check if the given net_device struct is a
  *  cfg80211 wifi interface
  * @net_device: the device to check
  *
@@ -309,7 +316,7 @@ static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
 }
 
 /**
- * batadv_wifi_flags_evaluate - calculate wifi flags for net_device
+ * batadv_wifi_flags_evaluate() - calculate wifi flags for net_device
  * @net_device: the device to check
  *
  * Return: batadv_hard_iface_wifi_flags flags of the device
@@ -344,7 +351,7 @@ static u32 batadv_wifi_flags_evaluate(struct net_device *net_device)
 }
 
 /**
- * batadv_is_cfg80211_hardif - check if the given hardif is a cfg80211 wifi
+ * batadv_is_cfg80211_hardif() - check if the given hardif is a cfg80211 wifi
  *  interface
  * @hard_iface: the device to check
  *
@@ -362,7 +369,7 @@ bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_is_wifi_hardif - check if the given hardif is a wifi interface
+ * batadv_is_wifi_hardif() - check if the given hardif is a wifi interface
  * @hard_iface: the device to check
  *
  * Return: true if the net device is a 802.11 wireless device, false otherwise.
@@ -376,7 +383,7 @@ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_hardif_no_broadcast - check whether (re)broadcast is necessary
+ * batadv_hardif_no_broadcast() - check whether (re)broadcast is necessary
  * @if_outgoing: the outgoing interface checked and considered for (re)broadcast
  * @orig_addr: the originator of this packet
  * @orig_neigh: originator address of the forwarder we just got the packet from
@@ -560,6 +567,13 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
 	soft_iface->needed_tailroom = lower_tailroom;
 }
 
+/**
+ * batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: MTU for the soft-interface (limited by the minimal MTU of all active
+ *  slave interfaces)
+ */
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -606,7 +620,11 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
 	return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
 }
 
-/* adjusts the MTU if a new interface with a smaller MTU appeared. */
+/**
+ * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller
+ *  MTU appeared
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_update_min_mtu(struct net_device *soft_iface)
 {
 	soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
@@ -667,7 +685,7 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_master_del_slave - remove hard_iface from the current master interface
+ * batadv_master_del_slave() - remove hard_iface from the current master iface
  * @slave: the interface enslaved in another master
  * @master: the master from which slave has to be removed
  *
@@ -691,6 +709,14 @@ static int batadv_master_del_slave(struct batadv_hard_iface *slave,
 	return ret;
 }
 
+/**
+ * batadv_hardif_enable_interface() - Enslave hard interface to soft interface
+ * @hard_iface: hard interface to add to soft interface
+ * @net: the applicable net namespace
+ * @iface_name: name of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
 				   struct net *net, const char *iface_name)
 {
@@ -802,6 +828,12 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
 	return ret;
 }
 
+/**
+ * batadv_hardif_disable_interface() - Remove hard interface from soft interface
+ * @hard_iface: hard interface to be removed
+ * @autodel: whether to delete soft interface when it doesn't contain any other
+ *  slave interfaces
+ */
 void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
 				     enum batadv_hard_if_cleanup autodel)
 {
@@ -936,6 +968,9 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
 	batadv_hardif_put(hard_iface);
 }
 
+/**
+ * batadv_hardif_remove_interfaces() - Remove all hard interfaces
+ */
 void batadv_hardif_remove_interfaces(void)
 {
 	struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 9f9890f..de5e9a3 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -30,36 +31,74 @@
 struct net_device;
 struct net;
 
+/**
+ * enum batadv_hard_if_state - State of a hard interface
+ */
 enum batadv_hard_if_state {
+	/**
+	 * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a
+	 * batman-adv soft interface
+	 */
 	BATADV_IF_NOT_IN_USE,
+
+	/**
+	 * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft
+	 * interface
+	 */
 	BATADV_IF_TO_BE_REMOVED,
+
+	/** @BATADV_IF_INACTIVE: interface is deactivated */
 	BATADV_IF_INACTIVE,
+
+	/** @BATADV_IF_ACTIVE: interface is used */
 	BATADV_IF_ACTIVE,
+
+	/** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */
 	BATADV_IF_TO_BE_ACTIVATED,
+
+	/**
+	 * @BATADV_IF_I_WANT_YOU: interface is queued up (using sysfs) for being
+	 * added as slave interface of a batman-adv soft interface
+	 */
 	BATADV_IF_I_WANT_YOU,
 };
 
 /**
  * enum batadv_hard_if_bcast - broadcast avoidance options
- * @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface
- * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no recipient
- * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it from
- * @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator
  */
 enum batadv_hard_if_bcast {
+	/** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */
 	BATADV_HARDIF_BCAST_OK = 0,
+
+	/**
+	 * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no
+	 *  recipient
+	 */
 	BATADV_HARDIF_BCAST_NORECIPIENT,
+
+	/**
+	 * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it
+	 *  from
+	 */
 	BATADV_HARDIF_BCAST_DUPFWD,
+
+	/** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */
 	BATADV_HARDIF_BCAST_DUPORIG,
 };
 
 /**
  * enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
- * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
- * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was removed
  */
 enum batadv_hard_if_cleanup {
+	/**
+	 * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
+	 */
 	BATADV_IF_CLEANUP_KEEP,
+
+	/**
+	 * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was
+	 *  removed
+	 */
 	BATADV_IF_CLEANUP_AUTO,
 };
 
@@ -82,7 +121,7 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing,
 			       u8 *orig_addr, u8 *orig_neigh);
 
 /**
- * batadv_hardif_put - decrement the hard interface refcounter and possibly
+ * batadv_hardif_put() - decrement the hard interface refcounter and possibly
  *  release it
  * @hard_iface: the hard interface to free
  */
@@ -91,6 +130,12 @@ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface)
 	kref_put(&hard_iface->refcount, batadv_hardif_release);
 }
 
+/**
+ * batadv_primary_if_get_selected() - Get reference to primary interface
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: primary interface (with increased refcnt), otherwise NULL
+ */
 static inline struct batadv_hard_iface *
 batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
 {
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index b5f7e13..04d9643 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -18,7 +19,7 @@
 #include "hash.h"
 #include "main.h"
 
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/lockdep.h>
 #include <linux/slab.h>
 
@@ -33,7 +34,10 @@ static void batadv_hash_init(struct batadv_hashtable *hash)
 	}
 }
 
-/* free only the hashtable and the hash itself. */
+/**
+ * batadv_hash_destroy() - Free only the hashtable and the hash itself
+ * @hash: hash object to destroy
+ */
 void batadv_hash_destroy(struct batadv_hashtable *hash)
 {
 	kfree(hash->list_locks);
@@ -41,7 +45,12 @@ void batadv_hash_destroy(struct batadv_hashtable *hash)
 	kfree(hash);
 }
 
-/* allocates and clears the hash */
+/**
+ * batadv_hash_new() - Allocates and clears the hashtable
+ * @size: number of hash buckets to allocate
+ *
+ * Return: newly allocated hashtable, NULL on errors
+ */
 struct batadv_hashtable *batadv_hash_new(u32 size)
 {
 	struct batadv_hashtable *hash;
@@ -70,6 +79,11 @@ struct batadv_hashtable *batadv_hash_new(u32 size)
 	return NULL;
 }
 
+/**
+ * batadv_hash_set_lock_class() - Set specific lockdep class for hash spinlocks
+ * @hash: hash object to modify
+ * @key: lockdep class key address
+ */
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 				struct lock_class_key *key)
 {
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 0c905e9..4ce1b6d 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
@@ -45,10 +46,18 @@ typedef bool (*batadv_hashdata_compare_cb)(const struct hlist_node *,
 typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
 
+/**
+ * struct batadv_hashtable - Wrapper of simple hlist based hashtable
+ */
 struct batadv_hashtable {
-	struct hlist_head *table;   /* the hashtable itself with the buckets */
-	spinlock_t *list_locks;     /* spinlock for each hash list entry */
-	u32 size;		    /* size of hashtable */
+	/** @table: the hashtable itself with the buckets */
+	struct hlist_head *table;
+
+	/** @list_locks: spinlock for each hash list entry */
+	spinlock_t *list_locks;
+
+	/** @size: size of hashtable */
+	u32 size;
 };
 
 /* allocates and clears the hash */
@@ -62,7 +71,7 @@ void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 void batadv_hash_destroy(struct batadv_hashtable *hash);
 
 /**
- *	batadv_hash_add - adds data to the hashtable
+ *	batadv_hash_add() - adds data to the hashtable
  *	@hash: storage hash table
  *	@compare: callback to determine if 2 hash elements are identical
  *	@choose: callback calculating the hash index
@@ -112,8 +121,15 @@ static inline int batadv_hash_add(struct batadv_hashtable *hash,
 	return ret;
 }
 
-/* removes data from hash, if found. data could be the structure you use with
- * just the key filled, we just need the key for comparing.
+/**
+ * batadv_hash_remove() - Removes data from hash, if found
+ * @hash: hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ *
+ * ata could be the structure you use with  just the key filled, we just need
+ * the key for comparing.
  *
  * Return: returns pointer do data on success, so you can remove the used
  * structure yourself, or NULL on error
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index bded311..8041cf1 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -26,6 +27,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -42,11 +44,11 @@
 #include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/wait.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 static struct batadv_socket_client *batadv_socket_client_hash[256];
@@ -55,6 +57,9 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
 				     struct batadv_icmp_header *icmph,
 				     size_t icmp_len);
 
+/**
+ * batadv_socket_init() - Initialize soft interface independent socket data
+ */
 void batadv_socket_init(void)
 {
 	memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
@@ -314,6 +319,12 @@ static const struct file_operations batadv_fops = {
 	.llseek = no_llseek,
 };
 
+/**
+ * batadv_socket_setup() - Create debugfs "socket" file
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_socket_setup(struct batadv_priv *bat_priv)
 {
 	struct dentry *d;
@@ -333,7 +344,7 @@ int batadv_socket_setup(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_socket_add_packet - schedule an icmp packet to be sent to
+ * batadv_socket_add_packet() - schedule an icmp packet to be sent to
  *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
@@ -390,7 +401,7 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be received
+ * batadv_socket_receive_packet() - schedule an icmp packet to be received
  *  locally and sent to userspace.
  * @icmph: pointer to the header of the icmp packet
  * @icmp_len: total length of the icmp packet
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index f3fec40..84cddd0 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 4ef4bde..da00498 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -24,6 +25,7 @@
 #include <linux/export.h>
 #include <linux/fcntl.h>
 #include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -86,6 +88,13 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
 	return 0;
 }
 
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 {
 	va_list args;
@@ -197,6 +206,12 @@ static const struct file_operations batadv_log_fops = {
 	.llseek         = no_llseek,
 };
 
+/**
+ * batadv_debug_log_setup() - Initialize debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_debug_log_setup(struct batadv_priv *bat_priv)
 {
 	struct dentry *d;
@@ -222,6 +237,10 @@ int batadv_debug_log_setup(struct batadv_priv *bat_priv)
 	return -ENOMEM;
 }
 
+/**
+ * batadv_debug_log_cleanup() - Destroy debug log
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 {
 	kfree(bat_priv->debug_log);
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index 65ce97e..35e02b2 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -44,25 +45,33 @@ static inline void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
 
 /**
  * enum batadv_dbg_level - available log levels
- * @BATADV_DBG_BATMAN: OGM and TQ computations related messages
- * @BATADV_DBG_ROUTES: route added / changed / deleted
- * @BATADV_DBG_TT: translation table messages
- * @BATADV_DBG_BLA: bridge loop avoidance messages
- * @BATADV_DBG_DAT: ARP snooping and DAT related messages
- * @BATADV_DBG_NC: network coding related messages
- * @BATADV_DBG_MCAST: multicast related messages
- * @BATADV_DBG_TP_METER: throughput meter messages
- * @BATADV_DBG_ALL: the union of all the above log levels
  */
 enum batadv_dbg_level {
+	/** @BATADV_DBG_BATMAN: OGM and TQ computations related messages */
 	BATADV_DBG_BATMAN	= BIT(0),
+
+	/** @BATADV_DBG_ROUTES: route added / changed / deleted */
 	BATADV_DBG_ROUTES	= BIT(1),
+
+	/** @BATADV_DBG_TT: translation table messages */
 	BATADV_DBG_TT		= BIT(2),
+
+	/** @BATADV_DBG_BLA: bridge loop avoidance messages */
 	BATADV_DBG_BLA		= BIT(3),
+
+	/** @BATADV_DBG_DAT: ARP snooping and DAT related messages */
 	BATADV_DBG_DAT		= BIT(4),
+
+	/** @BATADV_DBG_NC: network coding related messages */
 	BATADV_DBG_NC		= BIT(5),
+
+	/** @BATADV_DBG_MCAST: multicast related messages */
 	BATADV_DBG_MCAST	= BIT(6),
+
+	/** @BATADV_DBG_TP_METER: throughput meter messages */
 	BATADV_DBG_TP_METER	= BIT(7),
+
+	/** @BATADV_DBG_ALL: the union of all the above log levels */
 	BATADV_DBG_ALL		= 255,
 };
 
@@ -70,7 +79,14 @@ enum batadv_dbg_level {
 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
 
-/* possibly ratelimited debug output */
+/**
+ * _batadv_dbg() - Store debug output with(out) ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ratelimited: whether output should be rate limited
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define _batadv_dbg(type, bat_priv, ratelimited, fmt, arg...)		\
 	do {								\
 		struct batadv_priv *__batpriv = (bat_priv);		\
@@ -89,11 +105,30 @@ static inline void _batadv_dbg(int type __always_unused,
 }
 #endif
 
+/**
+ * batadv_dbg() - Store debug output without ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg(type, bat_priv, arg...) \
 	_batadv_dbg(type, bat_priv, 0, ## arg)
+
+/**
+ * batadv_dbg_ratelimited() - Store debug output with ratelimiting
+ * @type: type of debug message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @arg...: format string and variable arguments
+ */
 #define batadv_dbg_ratelimited(type, bat_priv, arg...) \
 	_batadv_dbg(type, bat_priv, 1, ## arg)
 
+/**
+ * batadv_info() - Store message in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_info(net_dev, fmt, arg...)				\
 	do {								\
 		struct net_device *_netdev = (net_dev);                 \
@@ -101,6 +136,13 @@ static inline void _batadv_dbg(int type __always_unused,
 		batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg);	\
 		pr_info("%s: " fmt, _netdev->name, ## arg);		\
 	} while (0)
+
+/**
+ * batadv_err() - Store error in debug buffer and print it to kmsg buffer
+ * @net_dev: the soft interface net device
+ * @fmt: format string
+ * @arg...: variable arguments
+ */
 #define batadv_err(net_dev, fmt, arg...)				\
 	do {								\
 		struct net_device *_netdev = (net_dev);                 \
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 4daed7ad..d31c826 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -18,12 +19,12 @@
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/init.h>
@@ -45,6 +46,7 @@
 #include <linux/workqueue.h>
 #include <net/dsfield.h>
 #include <net/rtnetlink.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -62,7 +64,6 @@
 #include "netlink.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "soft-interface.h"
@@ -139,6 +140,12 @@ static void __exit batadv_exit(void)
 	batadv_tt_cache_destroy();
 }
 
+/**
+ * batadv_mesh_init() - Initialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_mesh_init(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -216,6 +223,10 @@ int batadv_mesh_init(struct net_device *soft_iface)
 	return ret;
 }
 
+/**
+ * batadv_mesh_free() - Deinitialize soft interface
+ * @soft_iface: netdev struct of the soft interface
+ */
 void batadv_mesh_free(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -255,8 +266,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
 }
 
 /**
- * batadv_is_my_mac - check if the given mac address belongs to any of the real
- * interfaces in the current mesh
+ * batadv_is_my_mac() - check if the given mac address belongs to any of the
+ *  real interfaces in the current mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
  *
@@ -286,7 +297,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_seq_print_text_primary_if_get - called from debugfs table printing
+ * batadv_seq_print_text_primary_if_get() - called from debugfs table printing
  *  function that requires the primary interface
  * @seq: debugfs table seq_file struct
  *
@@ -323,7 +334,7 @@ batadv_seq_print_text_primary_if_get(struct seq_file *seq)
 #endif
 
 /**
- * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ * batadv_max_header_len() - calculate maximum encapsulation overhead for a
  *  payload packet
  *
  * Return: the maximum encapsulation overhead in bytes.
@@ -348,7 +359,7 @@ int batadv_max_header_len(void)
 }
 
 /**
- * batadv_skb_set_priority - sets skb priority according to packet content
+ * batadv_skb_set_priority() - sets skb priority according to packet content
  * @skb: the packet to be sent
  * @offset: offset to the packet content
  *
@@ -412,6 +423,16 @@ static int batadv_recv_unhandled_packet(struct sk_buff *skb,
 /* incoming packets with the batman ethertype received on any active hard
  * interface
  */
+
+/**
+ * batadv_batman_skb_recv() - Handle incoming message from an hard interface
+ * @skb: the received packet
+ * @dev: the net device that the packet was received on
+ * @ptype: packet type of incoming packet (ETH_P_BATMAN)
+ * @orig_dev: the original receive net device (e.g. bonded device)
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
 			   struct packet_type *ptype,
 			   struct net_device *orig_dev)
@@ -535,6 +556,13 @@ static void batadv_recv_handler_init(void)
 	batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
 }
 
+/**
+ * batadv_recv_handler_register() - Register handler for batman-adv packet type
+ * @packet_type: batadv_packettype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int
 batadv_recv_handler_register(u8 packet_type,
 			     int (*recv_handler)(struct sk_buff *,
@@ -552,13 +580,17 @@ batadv_recv_handler_register(u8 packet_type,
 	return 0;
 }
 
+/**
+ * batadv_recv_handler_unregister() - Unregister handler for packet type
+ * @packet_type: batadv_packettype which should no longer be handled
+ */
 void batadv_recv_handler_unregister(u8 packet_type)
 {
 	batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
 }
 
 /**
- * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
+ * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
  *  the header
  * @skb: skb pointing to fragmented socket buffers
  * @payload_ptr: Pointer to position inside the head buffer of the skb
@@ -591,7 +623,7 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
 }
 
 /**
- * batadv_get_vid - extract the VLAN identifier from skb if any
+ * batadv_get_vid() - extract the VLAN identifier from skb if any
  * @skb: the buffer containing the packet
  * @header_len: length of the batman header preceding the ethernet header
  *
@@ -618,7 +650,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
 }
 
 /**
- * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
+ * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier for which the AP isolation attributed as to be
  *  looked up
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index edb2f23..f7ba3f9 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -24,7 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2017.4"
+#define BATADV_SOURCE_VERSION "2018.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -140,24 +141,56 @@
  */
 #define BATADV_TP_MAX_NUM 5
 
+/**
+ * enum batadv_mesh_state - State of a soft interface
+ */
 enum batadv_mesh_state {
+	/** @BATADV_MESH_INACTIVE: soft interface is not yet running */
 	BATADV_MESH_INACTIVE,
+
+	/** @BATADV_MESH_ACTIVE: interface is up and running */
 	BATADV_MESH_ACTIVE,
+
+	/** @BATADV_MESH_DEACTIVATING: interface is getting shut down */
 	BATADV_MESH_DEACTIVATING,
 };
 
 #define BATADV_BCAST_QUEUE_LEN		256
 #define BATADV_BATMAN_QUEUE_LEN	256
 
+/**
+ * enum batadv_uev_action - action type of uevent
+ */
 enum batadv_uev_action {
+	/** @BATADV_UEV_ADD: gateway was selected (after none was selected) */
 	BATADV_UEV_ADD = 0,
+
+	/**
+	 * @BATADV_UEV_DEL: selected gateway was removed and none is selected
+	 * anymore
+	 */
 	BATADV_UEV_DEL,
+
+	/**
+	 * @BATADV_UEV_CHANGE: a different gateway was selected as based gateway
+	 */
 	BATADV_UEV_CHANGE,
+
+	/**
+	 * @BATADV_UEV_LOOPDETECT: loop was detected which cannot be handled by
+	 * bridge loop avoidance
+	 */
 	BATADV_UEV_LOOPDETECT,
 };
 
+/**
+ * enum batadv_uev_type - Type of uevent
+ */
 enum batadv_uev_type {
+	/** @BATADV_UEV_GW: selected gateway was modified */
 	BATADV_UEV_GW = 0,
+
+	/** @BATADV_UEV_BLA: bridge loop avoidance event */
 	BATADV_UEV_BLA,
 };
 
@@ -184,16 +217,14 @@ enum batadv_uev_type {
 
 /* Kernel headers */
 
-#include <linux/bitops.h> /* for packet.h */
 #include <linux/compiler.h>
 #include <linux/etherdevice.h>
-#include <linux/if_ether.h> /* for packet.h */
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
 #include <linux/percpu.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
-#include "packet.h"
 #include "types.h"
 
 struct net_device;
@@ -202,7 +233,7 @@ struct seq_file;
 struct sk_buff;
 
 /**
- * batadv_print_vid - return printable version of vid information
+ * batadv_print_vid() - return printable version of vid information
  * @vid: the VLAN identifier
  *
  * Return: -1 when no VLAN is used, VLAN id otherwise
@@ -238,7 +269,7 @@ void batadv_recv_handler_unregister(u8 packet_type);
 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
 
 /**
- * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
+ * batadv_compare_eth() - Compare two not u16 aligned Ethernet addresses
  * @data1: Pointer to a six-byte array containing the Ethernet address
  * @data2: Pointer other six-byte array containing the Ethernet address
  *
@@ -252,7 +283,7 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
 }
 
 /**
- * batadv_has_timed_out - compares current time (jiffies) and timestamp +
+ * batadv_has_timed_out() - compares current time (jiffies) and timestamp +
  *  timeout
  * @timestamp:		base value to compare with (in jiffies)
  * @timeout:		added to base value before comparing (in milliseconds)
@@ -265,40 +296,96 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
 	return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
 }
 
+/**
+ * batadv_atomic_dec_not_zero() - Decrease unless the number is 0
+ * @v: pointer of type atomic_t
+ *
+ * Return: non-zero if v was not 0, and zero otherwise.
+ */
 #define batadv_atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
 
-/* Returns the smallest signed integer in two's complement with the sizeof x */
+/**
+ * batadv_smallest_signed_int() - Returns the smallest signed integer in two's
+ *  complement with the sizeof x
+ * @x: type of integer
+ *
+ * Return: smallest signed integer of type
+ */
 #define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
 
-/* Checks if a sequence number x is a predecessor/successor of y.
- * they handle overflows/underflows and can correctly check for a
- * predecessor/successor unless the variable sequence number has grown by
- * more then 2**(bitwidth(x)-1)-1.
+/**
+ * batadv_seq_before() - Checks if a sequence number x is a predecessor of y
+ * @x: potential predecessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a predecessor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
  * This means that for a u8 with the maximum value 255, it would think:
- *  - when adding nothing - it is neither a predecessor nor a successor
- *  - before adding more than 127 to the starting value - it is a predecessor,
- *  - when adding 128 - it is neither a predecessor nor a successor,
- *  - after adding more than 127 to the starting value - it is a successor
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a predecessor of y, false otherwise
  */
 #define batadv_seq_before(x, y) ({typeof(x)_d1 = (x); \
 				 typeof(y)_d2 = (y); \
 				 typeof(x)_dummy = (_d1 - _d2); \
 				 (void)(&_d1 == &_d2); \
 				 _dummy > batadv_smallest_signed_int(_dummy); })
+
+/**
+ * batadv_seq_after() - Checks if a sequence number x is a successor of y
+ * @x: potential sucessor of @y
+ * @y: value to compare @x against
+ *
+ * It handles overflows/underflows and can correctly check for a successor
+ * unless the variable sequence number has grown by more then
+ * 2**(bitwidth(x)-1)-1.
+ *
+ * This means that for a u8 with the maximum value 255, it would think:
+ *
+ * * when adding nothing - it is neither a predecessor nor a successor
+ * * before adding more than 127 to the starting value - it is a predecessor,
+ * * when adding 128 - it is neither a predecessor nor a successor,
+ * * after adding more than 127 to the starting value - it is a successor
+ *
+ * Return: true when x is a successor of y, false otherwise
+ */
 #define batadv_seq_after(x, y) batadv_seq_before(y, x)
 
-/* Stop preemption on local cpu while incrementing the counter */
+/**
+ * batadv_add_counter() - Add to per cpu statistics counter of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: counter index which should be modified
+ * @count: value to increase counter by
+ *
+ * Stop preemption on local cpu while incrementing the counter
+ */
 static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
 				      size_t count)
 {
 	this_cpu_add(bat_priv->bat_counters[idx], count);
 }
 
+/**
+ * batadv_inc_counter() - Increase per cpu statistics counter of soft interface
+ * @b: the bat priv with all the soft interface information
+ * @i: counter index which should be modified
+ */
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
-/* Define a macro to reach the control buffer of the skb. The members of the
- * control buffer are defined in struct batadv_skb_cb in types.h.
- * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+/**
+ * BATADV_SKB_CB() - Get batadv_skb_cb from skb control buffer
+ * @__skb: skb holding the control buffer
+ *
+ * The members of the control buffer are defined in struct batadv_skb_cb in
+ * types.h. The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
+ *
+ * Return: pointer to the batadv_skb_cb of the skb
  */
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index e553a87..cbdeb47 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -24,7 +25,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/icmpv6.h>
 #include <linux/if_bridge.h>
 #include <linux/if_ether.h>
@@ -54,18 +55,18 @@
 #include <net/if_inet6.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
-#include "packet.h"
 #include "translation-table.h"
 #include "tvlv.h"
 
 static void batadv_mcast_mla_update(struct work_struct *work);
 
 /**
- * batadv_mcast_start_timer - schedule the multicast periodic worker
+ * batadv_mcast_start_timer() - schedule the multicast periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
@@ -75,7 +76,7 @@ static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_get_bridge - get the bridge on top of the softif if it exists
+ * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
  * @soft_iface: netdev struct of the mesh interface
  *
  * If the given soft interface has a bridge on top then the refcount
@@ -101,7 +102,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
 }
 
 /**
- * batadv_mcast_mla_softif_get - get softif multicast listeners
+ * batadv_mcast_mla_softif_get() - get softif multicast listeners
  * @dev: the device to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -147,7 +148,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
 }
 
 /**
- * batadv_mcast_mla_is_duplicate - check whether an address is in a list
+ * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
  * @mcast_addr: the multicast address to check
  * @mcast_list: the list with multicast addresses to search in
  *
@@ -167,7 +168,7 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
 }
 
 /**
- * batadv_mcast_mla_br_addr_cpy - copy a bridge multicast address
+ * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
  * @dst: destination to write to - a multicast MAC address
  * @src: source to read from - a multicast IP address
  *
@@ -191,7 +192,7 @@ static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
 }
 
 /**
- * batadv_mcast_mla_bridge_get - get bridged-in multicast listeners
+ * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
  * @dev: a bridge slave whose bridge to collect multicast addresses from
  * @mcast_list: a list to put found addresses into
  *
@@ -244,7 +245,7 @@ static int batadv_mcast_mla_bridge_get(struct net_device *dev,
 }
 
 /**
- * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * batadv_mcast_mla_list_free() - free a list of multicast addresses
  * @mcast_list: the list to free
  *
  * Removes and frees all items in the given mcast_list.
@@ -261,7 +262,7 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
 }
 
 /**
- * batadv_mcast_mla_tt_retract - clean up multicast listener announcements
+ * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which should _not_ be removed
  *
@@ -297,7 +298,7 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_mla_tt_add - add multicast listener announcements
+ * batadv_mcast_mla_tt_add() - add multicast listener announcements
  * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: a list of addresses which are going to get added
  *
@@ -333,7 +334,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_has_bridge - check whether the soft-iface is bridged
+ * batadv_mcast_has_bridge() - check whether the soft-iface is bridged
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Checks whether there is a bridge on top of our soft interface.
@@ -354,7 +355,8 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_querier_log - debug output regarding the querier status on link
+ * batadv_mcast_querier_log() - debug output regarding the querier status on
+ *  link
  * @bat_priv: the bat priv with all the soft interface information
  * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
  * @old_state: the previous querier state on our link
@@ -405,7 +407,8 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
 }
 
 /**
- * batadv_mcast_bridge_log - debug output for topology changes in bridged setups
+ * batadv_mcast_bridge_log() - debug output for topology changes in bridged
+ *  setups
  * @bat_priv: the bat priv with all the soft interface information
  * @bridged: a flag about whether the soft interface is currently bridged or not
  * @querier_ipv4: (maybe) new status of a potential, selected IGMP querier
@@ -444,7 +447,7 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, bool bridged,
 }
 
 /**
- * batadv_mcast_flags_logs - output debug information about mcast flag changes
+ * batadv_mcast_flags_logs() - output debug information about mcast flag changes
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: flags indicating the new multicast state
  *
@@ -470,7 +473,7 @@ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
 }
 
 /**
- * batadv_mcast_mla_tvlv_update - update multicast tvlv
+ * batadv_mcast_mla_tvlv_update() - update multicast tvlv
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast tvlv with our current multicast related settings,
@@ -545,7 +548,7 @@ static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * __batadv_mcast_mla_update - update the own MLAs
+ * __batadv_mcast_mla_update() - update the own MLAs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Updates the own multicast listener announcements in the translation
@@ -582,7 +585,7 @@ static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_mla_update - update the own MLAs
+ * batadv_mcast_mla_update() - update the own MLAs
  * @work: kernel work struct
  *
  * Updates the own multicast listener announcements in the translation
@@ -605,7 +608,7 @@ static void batadv_mcast_mla_update(struct work_struct *work)
 }
 
 /**
- * batadv_mcast_is_report_ipv4 - check for IGMP reports
+ * batadv_mcast_is_report_ipv4() - check for IGMP reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -630,7 +633,8 @@ static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv4 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -671,7 +675,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_is_report_ipv6 - check for MLD reports
+ * batadv_mcast_is_report_ipv6() - check for MLD reports
  * @skb: the ethernet frame destined for the mesh
  *
  * This call might reallocate skb data.
@@ -695,7 +699,8 @@ static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
 }
 
 /**
- * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
+ *  potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the IPv6 packet to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -736,7 +741,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_mode_check - check for optimized forwarding potential
+ * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the multicast frame to check
  * @is_unsnoopable: stores whether the destination is snoopable
@@ -774,7 +779,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_want_all_ip_count - count nodes with unspecific mcast
+ * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
  *  interest
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
@@ -798,7 +803,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_tt_node_get - get a multicast tt node
+ * batadv_mcast_forw_tt_node_get() - get a multicast tt node
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the ether header containing the multicast destination
  *
@@ -814,7 +819,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_ipv4_node_get - get a node with an ipv4 flag
+ * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
@@ -841,7 +846,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ipv6_node_get - get a node with an ipv6 flag
+ * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
@@ -868,7 +873,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
@@ -892,7 +897,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
@@ -919,7 +924,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_forw_mode - check on how to forward a multicast packet
+ * batadv_mcast_forw_mode() - check on how to forward a multicast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: The multicast packet to check
  * @orig: an originator to be set to forward the skb to
@@ -973,7 +978,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_mcast_want_unsnoop_update - update unsnoop counter and list
+ * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1018,7 +1023,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv4_update - update want-all-ipv4 counter and list
+ * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1063,7 +1068,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_want_ipv6_update - update want-all-ipv6 counter and list
+ * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node which multicast state might have changed of
  * @mcast_flags: flags indicating the new multicast state
@@ -1108,7 +1113,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_tvlv_ogm_handler - process incoming multicast tvlv container
+ * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -1164,7 +1169,7 @@ static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_init - initialize the multicast optimizations structures
+ * batadv_mcast_init() - initialize the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_init(struct batadv_priv *bat_priv)
@@ -1179,7 +1184,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_mcast_flags_print_header - print own mcast flags to debugfs table
+ * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
  * @bat_priv: the bat priv with all the soft interface information
  * @seq: debugfs table seq_file struct
  *
@@ -1220,7 +1225,7 @@ static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_flags_seq_print_text - print the mcast flags of other nodes
+ * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1281,7 +1286,7 @@ int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_mcast_free - free the multicast optimizations structures
+ * batadv_mcast_free() - free the multicast optimizations structures
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_mcast_free(struct batadv_priv *bat_priv)
@@ -1296,7 +1301,7 @@ void batadv_mcast_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_mcast_purge_orig - reset originator global mcast state modifications
+ * batadv_mcast_purge_orig() - reset originator global mcast state modifications
  * @orig: the originator which is going to get purged
  */
 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 2a78cdd..3ac0633 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
@@ -25,15 +26,21 @@ struct sk_buff;
 
 /**
  * enum batadv_forw_mode - the way a packet should be forwarded as
- * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
- *  flooding)
- * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
- *  BATMAN unicast routing protocol)
- * @BATADV_FORW_NONE: don't forward, drop it
  */
 enum batadv_forw_mode {
+	/**
+	 * @BATADV_FORW_ALL: forward the packet to all nodes (currently via
+	 *  classic flooding)
+	 */
 	BATADV_FORW_ALL,
+
+	/**
+	 * @BATADV_FORW_SINGLE: forward the packet to a single node (currently
+	 *  via the BATMAN unicast routing protocol)
+	 */
 	BATADV_FORW_SINGLE,
+
+	/** @BATADV_FORW_NONE: don't forward, drop it */
 	BATADV_FORW_NONE,
 };
 
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index ab13b4d..a823d38 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
@@ -23,8 +24,8 @@
 #include <linux/cache.h>
 #include <linux/errno.h>
 #include <linux/export.h>
-#include <linux/fs.h>
 #include <linux/genetlink.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -39,6 +40,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bat_algo.h"
@@ -46,7 +48,6 @@
 #include "gateway_client.h"
 #include "hard-interface.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
@@ -99,7 +100,7 @@ static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
 };
 
 /**
- * batadv_netlink_get_ifindex - Extract an interface index from a message
+ * batadv_netlink_get_ifindex() - Extract an interface index from a message
  * @nlh: Message header
  * @attrtype: Attribute which holds an interface index
  *
@@ -114,7 +115,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
 }
 
 /**
- * batadv_netlink_mesh_info_put - fill in generic information about mesh
+ * batadv_netlink_mesh_info_put() - fill in generic information about mesh
  *  interface
  * @msg: netlink message to be sent back
  * @soft_iface: interface for which the data should be taken
@@ -169,7 +170,7 @@ batadv_netlink_mesh_info_put(struct sk_buff *msg, struct net_device *soft_iface)
 }
 
 /**
- * batadv_netlink_get_mesh_info - handle incoming BATADV_CMD_GET_MESH_INFO
+ * batadv_netlink_get_mesh_info() - handle incoming BATADV_CMD_GET_MESH_INFO
  *  netlink request
  * @skb: received netlink message
  * @info: receiver information
@@ -230,7 +231,7 @@ batadv_netlink_get_mesh_info(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_put - Fill information of started tp_meter session
+ * batadv_netlink_tp_meter_put() - Fill information of started tp_meter session
  * @msg: netlink message to be sent back
  * @cookie: tp meter session cookie
  *
@@ -246,7 +247,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
 }
 
 /**
- * batadv_netlink_tpmeter_notify - send tp_meter result via netlink to client
+ * batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: destination of tp_meter session
  * @result: reason for tp meter session stop
@@ -309,7 +310,7 @@ int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_netlink_tp_meter_start - Start a new tp_meter session
+ * batadv_netlink_tp_meter_start() - Start a new tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -386,7 +387,7 @@ batadv_netlink_tp_meter_start(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_tp_meter_start - Cancel a running tp_meter session
+ * batadv_netlink_tp_meter_start() - Cancel a running tp_meter session
  * @skb: received netlink message
  * @info: receiver information
  *
@@ -431,7 +432,7 @@ batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info)
 }
 
 /**
- * batadv_netlink_dump_hardif_entry - Dump one hard interface into a message
+ * batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -473,7 +474,7 @@ batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_netlink_dump_hardifs - Dump all hard interface into a messages
+ * batadv_netlink_dump_hardifs() - Dump all hard interface into a messages
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -620,7 +621,7 @@ struct genl_family batadv_netlink_family __ro_after_init = {
 };
 
 /**
- * batadv_netlink_register - register batadv genl netlink family
+ * batadv_netlink_register() - register batadv genl netlink family
  */
 void __init batadv_netlink_register(void)
 {
@@ -632,7 +633,7 @@ void __init batadv_netlink_register(void)
 }
 
 /**
- * batadv_netlink_unregister - unregister batadv genl netlink family
+ * batadv_netlink_unregister() - unregister batadv genl netlink family
  */
 void batadv_netlink_unregister(void)
 {
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index f1cd8c5d..0e7e57b 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 3604d78..b48116b 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
@@ -25,7 +26,7 @@
 #include <linux/debugfs.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_packet.h>
 #include <linux/init.h>
@@ -35,6 +36,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
 #include <linux/random.h>
@@ -47,12 +49,12 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
 #include "originator.h"
-#include "packet.h"
 #include "routing.h"
 #include "send.h"
 #include "tvlv.h"
@@ -65,7 +67,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
 
 /**
- * batadv_nc_init - one-time initialization for network coding
+ * batadv_nc_init() - one-time initialization for network coding
  *
  * Return: 0 on success or negative error number in case of failure
  */
@@ -81,7 +83,7 @@ int __init batadv_nc_init(void)
 }
 
 /**
- * batadv_nc_start_timer - initialise the nc periodic worker
+ * batadv_nc_start_timer() - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
@@ -91,7 +93,7 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ * batadv_nc_tvlv_container_update() - update the network coding tvlv container
  *  after network coding setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -113,7 +115,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_status_update - update the network coding tvlv container after
+ * batadv_nc_status_update() - update the network coding tvlv container after
  *  network coding setting change
  * @net_dev: the soft interface net device
  */
@@ -125,7 +127,7 @@ void batadv_nc_status_update(struct net_device *net_dev)
 }
 
 /**
- * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -144,7 +146,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init() - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
@@ -185,7 +187,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables
+ * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
@@ -197,7 +199,7 @@ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init_orig - initialise the nc fields of an orig_node
+ * batadv_nc_init_orig() - initialise the nc fields of an orig_node
  * @orig_node: the orig_node which is going to be initialised
  */
 void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
@@ -209,8 +211,8 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_nc_node_release - release nc_node from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_node_release() - release nc_node from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_nc_node_release(struct kref *ref)
@@ -224,7 +226,7 @@ static void batadv_nc_node_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_node_put - decrement the nc_node refcounter and possibly
+ * batadv_nc_node_put() - decrement the nc_node refcounter and possibly
  *  release it
  * @nc_node: nc_node to be free'd
  */
@@ -234,8 +236,8 @@ static void batadv_nc_node_put(struct batadv_nc_node *nc_node)
 }
 
 /**
- * batadv_nc_path_release - release nc_path from lists and queue for free after
- *  rcu grace period
+ * batadv_nc_path_release() - release nc_path from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the nc_path
  */
 static void batadv_nc_path_release(struct kref *ref)
@@ -248,7 +250,7 @@ static void batadv_nc_path_release(struct kref *ref)
 }
 
 /**
- * batadv_nc_path_put - decrement the nc_path refcounter and possibly
+ * batadv_nc_path_put() - decrement the nc_path refcounter and possibly
  *  release it
  * @nc_path: nc_path to be free'd
  */
@@ -258,7 +260,7 @@ static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
 }
 
 /**
- * batadv_nc_packet_free - frees nc packet
+ * batadv_nc_packet_free() - frees nc packet
  * @nc_packet: the nc packet to free
  * @dropped: whether the packet is freed because is is dropped
  */
@@ -275,7 +277,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
 }
 
 /**
- * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged
+ * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_node: the nc node to check
  *
@@ -291,7 +293,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -311,7 +313,8 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out
+ * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed
+ *  out
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
@@ -331,7 +334,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale
+ * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale
  *  entries
  * @bat_priv: the bat priv with all the soft interface information
  * @list: list of nc nodes
@@ -369,7 +372,7 @@ batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig - purges all nc node data attached of the given
+ * batadv_nc_purge_orig() - purges all nc node data attached of the given
  *  originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig_node with the nc node entries to be purged
@@ -395,8 +398,8 @@ void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they
- *  have timed out nc nodes
+ * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if
+ *  they have timed out nc nodes
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
@@ -422,7 +425,7 @@ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove
+ * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove
  *  unused ones
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc paths to check
@@ -481,7 +484,7 @@ static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_hash_key_gen - computes the nc_path hash key
+ * batadv_nc_hash_key_gen() - computes the nc_path hash key
  * @key: buffer to hold the final hash key
  * @src: source ethernet mac address going into the hash key
  * @dst: destination ethernet mac address going into the hash key
@@ -494,7 +497,7 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
 }
 
 /**
- * batadv_nc_hash_choose - compute the hash value for an nc path
+ * batadv_nc_hash_choose() - compute the hash value for an nc path
  * @data: data to hash
  * @size: size of the hash table
  *
@@ -512,7 +515,7 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
 }
 
 /**
- * batadv_nc_hash_compare - comparing function used in the network coding hash
+ * batadv_nc_hash_compare() - comparing function used in the network coding hash
  *  tables
  * @node: node in the local table
  * @data2: second object to compare the node to
@@ -538,7 +541,7 @@ static bool batadv_nc_hash_compare(const struct hlist_node *node,
 }
 
 /**
- * batadv_nc_hash_find - search for an existing nc path and return it
+ * batadv_nc_hash_find() - search for an existing nc path and return it
  * @hash: hash table containing the nc path
  * @data: search key
  *
@@ -575,7 +578,7 @@ batadv_nc_hash_find(struct batadv_hashtable *hash,
 }
 
 /**
- * batadv_nc_send_packet - send non-coded packet and free nc_packet struct
+ * batadv_nc_send_packet() - send non-coded packet and free nc_packet struct
  * @nc_packet: the nc packet to send
  */
 static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
@@ -586,7 +589,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
 }
 
 /**
- * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet.
+ * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -625,7 +628,7 @@ static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet.
+ * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet.
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path the packet belongs to
  * @nc_packet: the nc packet to be checked
@@ -663,8 +666,8 @@ static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out
- *  nc packets
+ * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed
+ *  out nc packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: to be processed hash table
  * @process_fn: Function called to process given nc packet. Should return true
@@ -709,7 +712,8 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_worker - periodic task for house keeping related to network coding
+ * batadv_nc_worker() - periodic task for house keeping related to network
+ *  coding
  * @work: kernel work struct
  */
 static void batadv_nc_worker(struct work_struct *work)
@@ -749,8 +753,8 @@ static void batadv_nc_worker(struct work_struct *work)
 }
 
 /**
- * batadv_can_nc_with_orig - checks whether the given orig node is suitable for
- *  coding or not
+ * batadv_can_nc_with_orig() - checks whether the given orig node is suitable
+ *  for coding or not
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: neighboring orig node which may be used as nc candidate
  * @ogm_packet: incoming ogm packet also used for the checks
@@ -790,7 +794,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_find_nc_node - search for an existing nc node and return it
+ * batadv_nc_find_nc_node() - search for an existing nc node and return it
  * @orig_node: orig node originating the ogm packet
  * @orig_neigh_node: neighboring orig node from which we received the ogm packet
  *  (can be equal to orig_node)
@@ -830,7 +834,7 @@ batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was
+ * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was
  *  not found
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -890,7 +894,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node
  *  structs (best called on incoming OGMs)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
@@ -945,7 +949,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_get_path - get existing nc_path or allocate a new one
+ * batadv_nc_get_path() - get existing nc_path or allocate a new one
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the nc path
  * @src: ethernet source address - first half of the nc path search key
@@ -1006,7 +1010,7 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
+ * batadv_nc_random_weight_tq() - scale the receivers TQ-value to avoid unfair
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
  *
@@ -1029,7 +1033,7 @@ static u8 batadv_nc_random_weight_tq(u8 tq)
 }
 
 /**
- * batadv_nc_memxor - XOR destination with source
+ * batadv_nc_memxor() - XOR destination with source
  * @dst: byte array to XOR into
  * @src: byte array to XOR from
  * @len: length of destination array
@@ -1043,7 +1047,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
 }
 
 /**
- * batadv_nc_code_packets - code a received unicast_packet with an nc packet
+ * batadv_nc_code_packets() - code a received unicast_packet with an nc packet
  *  into a coded_packet and send it
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
@@ -1236,7 +1240,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst.
+ * batadv_nc_skb_coding_possible() - true if a decoded skb is available at dst.
  * @skb: data skb to forward
  * @dst: destination mac address of the other skb to code with
  * @src: source mac address of skb
@@ -1260,7 +1264,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 }
 
 /**
- * batadv_nc_path_search - Find the coding path matching in_nc_node and
+ * batadv_nc_path_search() - Find the coding path matching in_nc_node and
  *  out_nc_node to retrieve a buffered packet that can be used for coding.
  * @bat_priv: the bat priv with all the soft interface information
  * @in_nc_node: pointer to skb next hop's neighbor nc node
@@ -1328,8 +1332,8 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the
- *  skb's sender (may be equal to the originator).
+ * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of
+ *  the skb's sender (may be equal to the originator).
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
  * @eth_dst: next hop mac address of skb
@@ -1374,7 +1378,7 @@ batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the
+ * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the
  *  unicast skb before it is stored for use in later decoding
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
@@ -1409,7 +1413,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst.
+ * batadv_nc_skb_dst_search() - Loops through list of neighboring nodes to dst.
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
  * @ethhdr: pointer to the ethernet header inside the skb
@@ -1467,7 +1471,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding
+ * batadv_nc_skb_add_to_path() - buffer skb for later encoding / decoding
  * @skb: skb to add to path
  * @nc_path: path to add skb to
  * @neigh_node: next hop to forward packet to
@@ -1502,7 +1506,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_forward - try to code a packet or add it to the coding packet
+ * batadv_nc_skb_forward() - try to code a packet or add it to the coding packet
  *  buffer
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
@@ -1559,8 +1563,8 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used
- *  when decoding coded packets
+ * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be
+ *  used when decoding coded packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to store
  */
@@ -1620,7 +1624,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet
+ * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet
  *  should be saved in the decoding buffer and, if so, store it there
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to store
@@ -1640,7 +1644,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_decode_packet - decode given skb using the decode data stored
+ * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored
  *  in nc_packet
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: unicast skb to decode
@@ -1734,7 +1738,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_find_decoding_packet - search through buffered decoding data to
+ * batadv_nc_find_decoding_packet() - search through buffered decoding data to
  *  find the data needed to decode the coded packet
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: pointer to the ethernet header inside the coded packet
@@ -1799,7 +1803,7 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the
+ * batadv_nc_recv_coded_packet() - try to decode coded packet and enqueue the
  *  resulting unicast packet
  * @skb: incoming coded packet
  * @recv_if: pointer to interface this packet was received on
@@ -1874,7 +1878,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_nc_mesh_free - clean up network coding memory
+ * batadv_nc_mesh_free() - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
 void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
@@ -1891,7 +1895,7 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_nc_nodes_seq_print_text - print the nc node information
+ * batadv_nc_nodes_seq_print_text() - print the nc node information
  * @seq: seq file to print on
  * @offset: not used
  *
@@ -1954,7 +1958,7 @@ int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset)
 }
 
 /**
- * batadv_nc_init_debugfs - create nc folder and related files in debugfs
+ * batadv_nc_init_debugfs() - create nc folder and related files in debugfs
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index c66efb8..adaeafa 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 2967b86..58a7d92 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -21,7 +22,7 @@
 #include <linux/atomic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
@@ -30,10 +31,12 @@
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
+#include <linux/rcupdate.h>
 #include <linux/seq_file.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/stddef.h>
 #include <linux/workqueue.h>
 #include <net/sock.h>
 #include <uapi/linux/batman_adv.h>
@@ -55,10 +58,47 @@
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
 
+/**
+ * batadv_orig_hash_find() - Find and return originator from orig_hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @data: mac address of the originator
+ *
+ * Return: orig_node (with increased refcnt), NULL on errors
+ */
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
+{
+	struct batadv_hashtable *hash = bat_priv->orig_hash;
+	struct hlist_head *head;
+	struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
+	int index;
+
+	if (!hash)
+		return NULL;
+
+	index = batadv_choose_orig(data, hash->size);
+	head = &hash->table[index];
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+		if (!batadv_compare_eth(orig_node, data))
+			continue;
+
+		if (!kref_get_unless_zero(&orig_node->refcount))
+			continue;
+
+		orig_node_tmp = orig_node;
+		break;
+	}
+	rcu_read_unlock();
+
+	return orig_node_tmp;
+}
+
 static void batadv_purge_orig(struct work_struct *work);
 
 /**
- * batadv_compare_orig - comparing function used in the originator hash table
+ * batadv_compare_orig() - comparing function used in the originator hash table
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
@@ -73,7 +113,7 @@ bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * batadv_orig_node_vlan_get() - get an orig_node_vlan object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
@@ -104,7 +144,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ * batadv_orig_node_vlan_new() - search and possibly create an orig_node_vlan
  *  object
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
@@ -145,7 +185,7 @@ batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_node_vlan_release - release originator-vlan object from lists
+ * batadv_orig_node_vlan_release() - release originator-vlan object from lists
  *  and queue for free after rcu grace period
  * @ref: kref pointer of the originator-vlan object
  */
@@ -159,7 +199,7 @@ static void batadv_orig_node_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_vlan_put - decrement the refcounter and possibly release
+ * batadv_orig_node_vlan_put() - decrement the refcounter and possibly release
  *  the originator-vlan object
  * @orig_vlan: the originator-vlan object to release
  */
@@ -168,6 +208,12 @@ void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
 	kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
 }
 
+/**
+ * batadv_originator_init() - Initialize all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_originator_init(struct batadv_priv *bat_priv)
 {
 	if (bat_priv->orig_hash)
@@ -193,7 +239,7 @@ int batadv_originator_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
+ * batadv_neigh_ifinfo_release() - release neigh_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_ifinfo
  */
@@ -210,7 +256,7 @@ static void batadv_neigh_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_neigh_ifinfo_put() - decrement the refcounter and possibly release
  *  the neigh_ifinfo
  * @neigh_ifinfo: the neigh_ifinfo object to release
  */
@@ -220,7 +266,7 @@ void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
 }
 
 /**
- * batadv_hardif_neigh_release - release hardif neigh node from lists and
+ * batadv_hardif_neigh_release() - release hardif neigh node from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -240,7 +286,7 @@ static void batadv_hardif_neigh_release(struct kref *ref)
 }
 
 /**
- * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter
+ * batadv_hardif_neigh_put() - decrement the hardif neighbors refcounter
  *  and possibly release it
  * @hardif_neigh: hardif neigh neighbor to free
  */
@@ -250,7 +296,7 @@ void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
 }
 
 /**
- * batadv_neigh_node_release - release neigh_node from lists and queue for
+ * batadv_neigh_node_release() - release neigh_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the neigh_node
  */
@@ -275,7 +321,7 @@ static void batadv_neigh_node_release(struct kref *ref)
 }
 
 /**
- * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
+ * batadv_neigh_node_put() - decrement the neighbors refcounter and possibly
  *  release it
  * @neigh_node: neigh neighbor to free
  */
@@ -285,7 +331,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
 }
 
 /**
- * batadv_orig_router_get - router to the originator depending on iface
+ * batadv_orig_router_get() - router to the originator depending on iface
  * @orig_node: the orig node for the router
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
@@ -318,7 +364,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
+ * batadv_orig_ifinfo_get() - find the ifinfo from an orig_node
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -350,7 +396,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
+ * batadv_orig_ifinfo_new() - search and possibly create an orig_ifinfo object
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -396,7 +442,7 @@ batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
+ * batadv_neigh_ifinfo_get() - find the ifinfo from an neigh_node
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -429,7 +475,7 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
 }
 
 /**
- * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
+ * batadv_neigh_ifinfo_new() - search and possibly create an neigh_ifinfo object
  * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
@@ -472,7 +518,7 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
 }
 
 /**
- * batadv_neigh_node_get - retrieve a neighbour from the list
+ * batadv_neigh_node_get() - retrieve a neighbour from the list
  * @orig_node: originator which the neighbour belongs to
  * @hard_iface: the interface where this neighbour is connected to
  * @addr: the address of the neighbour
@@ -509,7 +555,7 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_hardif_neigh_create - create a hardif neighbour node
+ * batadv_hardif_neigh_create() - create a hardif neighbour node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  * @orig_node: originator object representing the neighbour
@@ -555,7 +601,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
+ * batadv_hardif_neigh_get_or_create() - retrieve or create a hardif neighbour
  *  node
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
@@ -579,7 +625,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
+ * batadv_hardif_neigh_get() - retrieve a hardif neighbour from the list
  * @hard_iface: the interface where this neighbour is connected to
  * @neigh_addr: the address of the neighbour
  *
@@ -611,7 +657,7 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
 }
 
 /**
- * batadv_neigh_node_create - create a neigh node object
+ * batadv_neigh_node_create() - create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -676,7 +722,7 @@ batadv_neigh_node_create(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_neigh_node_get_or_create - retrieve or create a neigh node object
+ * batadv_neigh_node_get_or_create() - retrieve or create a neigh node object
  * @orig_node: originator object representing the neighbour
  * @hard_iface: the interface where the neighbour is connected to
  * @neigh_addr: the mac address of the neighbour interface
@@ -700,7 +746,7 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
+ * batadv_hardif_neigh_seq_print_text() - print the single hop neighbour list
  * @seq: neighbour table seq_file struct
  * @offset: not used
  *
@@ -735,8 +781,8 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific
- *  outgoing interface
+ * batadv_hardif_neigh_dump() - Dump to netlink the neighbor infos for a
+ *  specific outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
  *
@@ -812,7 +858,7 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
+ * batadv_orig_ifinfo_release() - release orig_ifinfo from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_ifinfo
  */
@@ -835,7 +881,7 @@ static void batadv_orig_ifinfo_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_ifinfo_put - decrement the refcounter and possibly release
+ * batadv_orig_ifinfo_put() - decrement the refcounter and possibly release
  *  the orig_ifinfo
  * @orig_ifinfo: the orig_ifinfo object to release
  */
@@ -845,7 +891,7 @@ void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
 }
 
 /**
- * batadv_orig_node_free_rcu - free the orig_node
+ * batadv_orig_node_free_rcu() - free the orig_node
  * @rcu: rcu pointer of the orig_node
  */
 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
@@ -866,7 +912,7 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_orig_node_release - release orig_node from lists and queue for
+ * batadv_orig_node_release() - release orig_node from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the orig_node
  */
@@ -917,7 +963,7 @@ static void batadv_orig_node_release(struct kref *ref)
 }
 
 /**
- * batadv_orig_node_put - decrement the orig node refcounter and possibly
+ * batadv_orig_node_put() - decrement the orig node refcounter and possibly
  *  release it
  * @orig_node: the orig node to free
  */
@@ -926,6 +972,10 @@ void batadv_orig_node_put(struct batadv_orig_node *orig_node)
 	kref_put(&orig_node->refcount, batadv_orig_node_release);
 }
 
+/**
+ * batadv_originator_free() - Free all originator structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_originator_free(struct batadv_priv *bat_priv)
 {
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
@@ -959,7 +1009,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_orig_node_new - creates a new orig_node
+ * batadv_orig_node_new() - creates a new orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the originator
  *
@@ -1038,7 +1088,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor
  * @bat_priv: the bat priv with all the soft interface information
  * @neigh: orig node which is to be checked
  */
@@ -1079,7 +1129,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
+ * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1131,7 +1181,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_neighbors - purges neighbors from originator
+ * batadv_purge_orig_neighbors() - purges neighbors from originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1189,7 +1239,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_find_best_neighbor - finds the best neighbor after purging
+ * batadv_find_best_neighbor() - finds the best neighbor after purging
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  * @if_outgoing: the interface for which the metric should be compared
@@ -1224,7 +1274,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_purge_orig_node - purges obsolete information from an orig_node
+ * batadv_purge_orig_node() - purges obsolete information from an orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
@@ -1341,12 +1391,24 @@ static void batadv_purge_orig(struct work_struct *work)
 			   msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
 }
 
+/**
+ * batadv_purge_orig_ref() - Purge all outdated originators
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
 {
 	_batadv_purge_orig(bat_priv);
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_orig_seq_print_text() - Print the originator table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1376,7 +1438,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 }
 
 /**
- * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
+ * batadv_orig_hardif_seq_print_text() - writes originator infos for a specific
  *  outgoing interface
  * @seq: debugfs table seq_file struct
  * @offset: not used
@@ -1423,7 +1485,7 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_orig_dump - Dump to netlink the originator infos for a specific
+ * batadv_orig_dump() - Dump to netlink the originator infos for a specific
  *  outgoing interface
  * @msg: message to dump into
  * @cb: parameters for the dump
@@ -1499,6 +1561,13 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
 	return ret;
 }
 
+/**
+ * batadv_orig_hash_add_if() - Add interface to originators in orig_hash
+ * @hard_iface: hard interface to add (already slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
 			    int max_if_num)
 {
@@ -1534,6 +1603,13 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
 	return -ENOMEM;
 }
 
+/**
+ * batadv_orig_hash_del_if() - Remove interface from originators in orig_hash
+ * @hard_iface: hard interface to remove (still slave of the soft interface)
+ * @max_if_num: new number of interfaces
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
 			    int max_if_num)
 {
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index d94220a..8e543a3 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,14 +24,8 @@
 #include <linux/compiler.h>
 #include <linux/if_ether.h>
 #include <linux/jhash.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/stddef.h>
 #include <linux/types.h>
 
-#include "hash.h"
-
 struct netlink_callback;
 struct seq_file;
 struct sk_buff;
@@ -89,8 +84,13 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
 			  unsigned short vid);
 void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan);
 
-/* hashfunction to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+/**
+ * batadv_choose_orig() - Return the index of the orig entry in the hash table
+ * @data: mac address of the originator node
+ * @size: the size of the hash table
+ *
+ * Return: the hash index where the object represented by @data should be
+ * stored at.
  */
 static inline u32 batadv_choose_orig(const void *data, u32 size)
 {
@@ -100,34 +100,7 @@ static inline u32 batadv_choose_orig(const void *data, u32 size)
 	return hash % size;
 }
 
-static inline struct batadv_orig_node *
-batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
-	struct batadv_hashtable *hash = bat_priv->orig_hash;
-	struct hlist_head *head;
-	struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
-	int index;
-
-	if (!hash)
-		return NULL;
-
-	index = batadv_choose_orig(data, hash->size);
-	head = &hash->table[index];
-
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-		if (!batadv_compare_eth(orig_node, data))
-			continue;
-
-		if (!kref_get_unless_zero(&orig_node->refcount))
-			continue;
-
-		orig_node_tmp = orig_node;
-		break;
-	}
-	rcu_read_unlock();
-
-	return orig_node_tmp;
-}
+struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data);
 
 #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 40d9bf3..b6891e8 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -33,6 +34,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/stddef.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bitarray.h"
 #include "bridge_loop_avoidance.h"
@@ -43,7 +45,6 @@
 #include "log.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "tp_meter.h"
@@ -54,7 +55,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
 
 /**
- * _batadv_update_route - set the router for this originator
+ * _batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -118,7 +119,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_update_route - set the router for this originator
+ * batadv_update_route() - set the router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be configured
  * @recv_if: the receive interface for which this route is set
@@ -145,7 +146,7 @@ void batadv_update_route(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_window_protected - checks whether the host restarted and is in the
+ * batadv_window_protected() - checks whether the host restarted and is in the
  *  protection time.
  * @bat_priv: the bat priv with all the soft interface information
  * @seq_num_diff: difference between the current/received sequence number and
@@ -180,6 +181,14 @@ bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
 	return false;
 }
 
+/**
+ * batadv_check_management_packet() - Check preconditions for management packets
+ * @skb: incoming packet buffer
+ * @hard_iface: incoming hard interface
+ * @header_len: minimal header length of packet type
+ *
+ * Return: true when management preconditions are met, false otherwise
+ */
 bool batadv_check_management_packet(struct sk_buff *skb,
 				    struct batadv_hard_iface *hard_iface,
 				    int header_len)
@@ -212,7 +221,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * batadv_recv_my_icmp_packet() - receive an icmp packet locally
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: icmp packet to process
  *
@@ -347,6 +356,13 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
 	return ret;
 }
 
+/**
+ * batadv_recv_icmp_packet() - Process incoming icmp packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_icmp_packet(struct sk_buff *skb,
 			    struct batadv_hard_iface *recv_if)
 {
@@ -440,7 +456,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_check_unicast_packet - Check for malformed unicast packets
+ * batadv_check_unicast_packet() - Check for malformed unicast packets
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of header to pull
@@ -478,7 +494,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
  * @orig_node: originator node whose last bonding candidate should be retrieved
  *
  * Return: last bonding candidate of router or NULL if not found
@@ -501,7 +517,7 @@ batadv_last_bonding_get(struct batadv_orig_node *orig_node)
 }
 
 /**
- * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
  * @orig_node: originator node whose bonding candidates should be replaced
  * @new_candidate: new bonding candidate or NULL
  */
@@ -524,7 +540,7 @@ batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_find_router - find a suitable router for this originator
+ * batadv_find_router() - find a suitable router for this originator
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the destination node
  * @recv_if: pointer to interface this packet was received on
@@ -741,7 +757,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_reroute_unicast_packet - update the unicast header for re-routing
+ * batadv_reroute_unicast_packet() - update the unicast header for re-routing
  * @bat_priv: the bat priv with all the soft interface information
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
@@ -904,7 +920,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ * batadv_recv_unhandled_unicast_packet() - receive and process packets which
  *	are in the unicast number space but not yet known to the implementation
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
@@ -935,6 +951,13 @@ int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
 	return NET_RX_DROP;
 }
 
+/**
+ * batadv_recv_unicast_packet() - Process incoming unicast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_unicast_packet(struct sk_buff *skb,
 			       struct batadv_hard_iface *recv_if)
 {
@@ -1036,7 +1059,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
 }
 
 /**
- * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
  *
@@ -1090,7 +1113,7 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
 }
 
 /**
- * batadv_recv_frag_packet - process received fragment
+ * batadv_recv_frag_packet() - process received fragment
  * @skb: the received fragment
  * @recv_if: interface that the skb is received on
  *
@@ -1155,6 +1178,13 @@ int batadv_recv_frag_packet(struct sk_buff *skb,
 	return ret;
 }
 
+/**
+ * batadv_recv_bcast_packet() - Process incoming broadcast packet
+ * @skb: incoming packet buffer
+ * @recv_if: incoming hard interface
+ *
+ * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
+ */
 int batadv_recv_bcast_packet(struct sk_buff *skb,
 			     struct batadv_hard_iface *recv_if)
 {
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 5ede16c..a1289bc 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 7895323..2a5ab6f 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,7 +24,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
@@ -54,7 +55,7 @@
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
 /**
- * batadv_send_skb_packet - send an already prepared packet
+ * batadv_send_skb_packet() - send an already prepared packet
  * @skb: the packet to send
  * @hard_iface: the interface to use to send the broadcast packet
  * @dst_addr: the payload destination
@@ -123,12 +124,30 @@ int batadv_send_skb_packet(struct sk_buff *skb,
 	return NET_XMIT_DROP;
 }
 
+/**
+ * batadv_send_broadcast_skb() - Send broadcast packet via hard interface
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @hard_iface: outgoing interface
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_broadcast_skb(struct sk_buff *skb,
 			      struct batadv_hard_iface *hard_iface)
 {
 	return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
 }
 
+/**
+ * batadv_send_unicast_skb() - Send unicast packet to neighbor
+ * @skb: packet to be transmitted (with batadv header and no outer eth header)
+ * @neigh: neighbor which is used as next hop to destination
+ *
+ * Return: A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ */
 int batadv_send_unicast_skb(struct sk_buff *skb,
 			    struct batadv_neigh_node *neigh)
 {
@@ -153,7 +172,7 @@ int batadv_send_unicast_skb(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
+ * batadv_send_skb_to_orig() - Lookup next-hop and transmit skb.
  * @skb: Packet to be transmitted.
  * @orig_node: Final destination of the packet.
  * @recv_if: Interface used when receiving the packet (can be NULL).
@@ -216,7 +235,7 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ * batadv_send_skb_push_fill_unicast() - extend the buffer and initialize the
  *  common fields for unicast packets
  * @skb: the skb carrying the unicast header to initialize
  * @hdr_size: amount of bytes to push at the beginning of the skb
@@ -249,7 +268,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
 }
 
 /**
- * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * batadv_send_skb_prepare_unicast() - encapsulate an skb with a unicast header
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
@@ -264,7 +283,7 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
 }
 
 /**
- * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a
  *  unicast 4addr header
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the skb containing the payload to encapsulate
@@ -308,7 +327,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * batadv_send_skb_unicast() - encapsulate and send an skb via unicast
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -378,7 +397,7 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * batadv_send_skb_via_tt_generic() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @packet_type: the batman unicast packet type to use
@@ -425,7 +444,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * batadv_send_skb_via_gw() - send an skb via gateway lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @vid: the vid to be used to search the translation table
@@ -452,7 +471,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 }
 
 /**
- * batadv_forw_packet_free - free a forwarding packet
+ * batadv_forw_packet_free() - free a forwarding packet
  * @forw_packet: The packet to free
  * @dropped: whether the packet is freed because is is dropped
  *
@@ -477,7 +496,7 @@ void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_alloc - allocate a forwarding packet
+ * batadv_forw_packet_alloc() - allocate a forwarding packet
  * @if_incoming: The (optional) if_incoming to be grabbed
  * @if_outgoing: The (optional) if_outgoing to be grabbed
  * @queue_left: The (optional) queue counter to decrease
@@ -543,7 +562,7 @@ batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
 }
 
 /**
- * batadv_forw_packet_was_stolen - check whether someone stole this packet
+ * batadv_forw_packet_was_stolen() - check whether someone stole this packet
  * @forw_packet: the forwarding packet to check
  *
  * This function checks whether the given forwarding packet was claimed by
@@ -558,7 +577,7 @@ batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_steal - claim a forw_packet for free()
+ * batadv_forw_packet_steal() - claim a forw_packet for free()
  * @forw_packet: the forwarding packet to steal
  * @lock: a key to the store to steal from (e.g. forw_{bat,bcast}_list_lock)
  *
@@ -589,7 +608,7 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_list_steal - claim a list of forward packets for free()
+ * batadv_forw_packet_list_steal() - claim a list of forward packets for free()
  * @forw_list: the to be stolen forward packets
  * @cleanup_list: a backup pointer, to be able to dispose the packet later
  * @hard_iface: the interface to steal forward packets from
@@ -625,7 +644,7 @@ batadv_forw_packet_list_steal(struct hlist_head *forw_list,
 }
 
 /**
- * batadv_forw_packet_list_free - free a list of forward packets
+ * batadv_forw_packet_list_free() - free a list of forward packets
  * @head: a list of to be freed forw_packets
  *
  * This function cancels the scheduling of any packet in the provided list,
@@ -649,7 +668,7 @@ static void batadv_forw_packet_list_free(struct hlist_head *head)
 }
 
 /**
- * batadv_forw_packet_queue - try to queue a forwarding packet
+ * batadv_forw_packet_queue() - try to queue a forwarding packet
  * @forw_packet: the forwarding packet to queue
  * @lock: a key to the store (e.g. forw_{bat,bcast}_list_lock)
  * @head: the shelve to queue it on (e.g. forw_{bat,bcast}_list)
@@ -693,7 +712,7 @@ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcast_queue - try to queue a broadcast packet
+ * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -712,7 +731,7 @@ batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_forw_packet_ogmv1_queue - try to queue an OGMv1 packet
+ * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the forwarding packet to queue
  * @send_time: timestamp (jiffies) when the packet is to be sent
@@ -730,7 +749,7 @@ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
+ * batadv_add_bcast_packet_to_list() - queue broadcast packet for multiple sends
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: broadcast packet to add
  * @delay: number of jiffies to wait before sending
@@ -790,7 +809,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_forw_packet_bcasts_left - check if a retransmission is necessary
+ * batadv_forw_packet_bcasts_left() - check if a retransmission is necessary
  * @forw_packet: the forwarding packet to check
  * @hard_iface: the interface to check on
  *
@@ -818,7 +837,8 @@ batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
 }
 
 /**
- * batadv_forw_packet_bcasts_inc - increment retransmission counter of a packet
+ * batadv_forw_packet_bcasts_inc() - increment retransmission counter of a
+ *  packet
  * @forw_packet: the packet to increase the counter for
  */
 static void
@@ -828,7 +848,7 @@ batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
 }
 
 /**
- * batadv_forw_packet_is_rebroadcast - check packet for previous transmissions
+ * batadv_forw_packet_is_rebroadcast() - check packet for previous transmissions
  * @forw_packet: the packet to check
  *
  * Return: True if this packet was transmitted before, false otherwise.
@@ -953,7 +973,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
 }
 
 /**
- * batadv_purge_outstanding_packets - stop/purge scheduled bcast/OGMv1 packets
+ * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets
  * @bat_priv: the bat priv with all the soft interface information
  * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on
  *
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index a16b34f..1e8c790 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -23,8 +24,7 @@
 #include <linux/compiler.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
-
-#include "packet.h"
+#include <uapi/linux/batadv_packet.h>
 
 struct sk_buff;
 
@@ -76,7 +76,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 			   unsigned short vid);
 
 /**
- * batadv_send_skb_via_tt - send an skb via TT lookup
+ * batadv_send_skb_via_tt() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @dst_hint: can be used to override the destination contained in the skb
@@ -97,7 +97,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * batadv_send_skb_via_tt_4addr() - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @packet_subtype: the unicast 4addr packet subtype to use
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9f673cd..900c5ce 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -26,7 +27,7 @@
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/jiffies.h>
@@ -48,6 +49,7 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bat_algo.h"
 #include "bridge_loop_avoidance.h"
@@ -59,11 +61,17 @@
 #include "multicast.h"
 #include "network-coding.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "sysfs.h"
 #include "translation-table.h"
 
+/**
+ * batadv_skb_head_push() - Increase header size and move (push) head pointer
+ * @skb: packet buffer which should be modified
+ * @len: number of bytes to add
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
 {
 	int result;
@@ -96,7 +104,7 @@ static int batadv_interface_release(struct net_device *dev)
 }
 
 /**
- * batadv_sum_counter - Sum the cpu-local counters for index 'idx'
+ * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
  * @bat_priv: the bat priv with all the soft interface information
  * @idx: index of counter to sum up
  *
@@ -169,7 +177,7 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
 }
 
 /**
- * batadv_interface_set_rx_mode - set the rx mode of a device
+ * batadv_interface_set_rx_mode() - set the rx mode of a device
  * @dev: registered network device to modify
  *
  * We do not actually need to set any rx filters for the virtual batman
@@ -389,7 +397,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
 }
 
 /**
- * batadv_interface_rx - receive ethernet frame on local batman-adv interface
+ * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
  * @soft_iface: local interface which will receive the ethernet frame
  * @skb: ethernet frame for @soft_iface
  * @hdr_size: size of already parsed batman-adv header
@@ -501,8 +509,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
 }
 
 /**
- * batadv_softif_vlan_release - release vlan from lists and queue for free after
- *  rcu grace period
+ * batadv_softif_vlan_release() - release vlan from lists and queue for free
+ *  after rcu grace period
  * @ref: kref pointer of the vlan object
  */
 static void batadv_softif_vlan_release(struct kref *ref)
@@ -519,7 +527,7 @@ static void batadv_softif_vlan_release(struct kref *ref)
 }
 
 /**
- * batadv_softif_vlan_put - decrease the vlan object refcounter and
+ * batadv_softif_vlan_put() - decrease the vlan object refcounter and
  *  possibly release it
  * @vlan: the vlan object to release
  */
@@ -532,7 +540,7 @@ void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan)
 }
 
 /**
- * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * batadv_softif_vlan_get() - get the vlan object for a specific vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the identifier of the vlan object to retrieve
  *
@@ -561,7 +569,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_softif_create_vlan - allocate the needed resources for a new vlan
+ * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
@@ -613,7 +621,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 }
 
 /**
- * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the object to remove
  */
@@ -631,7 +639,7 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_interface_add_vid - ndo_add_vid API implementation
+ * batadv_interface_add_vid() - ndo_add_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the new vlan
@@ -689,7 +697,7 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 }
 
 /**
- * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * batadv_interface_kill_vid() - ndo_kill_vid API implementation
  * @dev: the netdev of the mesh interface
  * @proto: protocol of the the vlan id
  * @vid: identifier of the deleted vlan
@@ -732,7 +740,7 @@ static struct lock_class_key batadv_netdev_xmit_lock_key;
 static struct lock_class_key batadv_netdev_addr_lock_key;
 
 /**
- * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue
+ * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
  * @dev: device which owns the tx queue
  * @txq: tx queue to modify
  * @_unused: always NULL
@@ -745,7 +753,7 @@ static void batadv_set_lockdep_class_one(struct net_device *dev,
 }
 
 /**
- * batadv_set_lockdep_class - Set txq and addr_list lockdep class
+ * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
  * @dev: network device to modify
  */
 static void batadv_set_lockdep_class(struct net_device *dev)
@@ -755,7 +763,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_late - late stage initialization of soft interface
+ * batadv_softif_init_late() - late stage initialization of soft interface
  * @dev: registered network device to modify
  *
  * Return: error code on failures
@@ -860,7 +868,7 @@ static int batadv_softif_init_late(struct net_device *dev)
 }
 
 /**
- * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface
+ * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should become the slave interface
  * @extack: extended ACK report struct
@@ -888,7 +896,7 @@ static int batadv_softif_slave_add(struct net_device *dev,
 }
 
 /**
- * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface
+ * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should be removed from the master interface
  *
@@ -1023,7 +1031,7 @@ static const struct ethtool_ops batadv_ethtool_ops = {
 };
 
 /**
- * batadv_softif_free - Deconstructor of batadv_soft_interface
+ * batadv_softif_free() - Deconstructor of batadv_soft_interface
  * @dev: Device to cleanup and remove
  */
 static void batadv_softif_free(struct net_device *dev)
@@ -1039,7 +1047,7 @@ static void batadv_softif_free(struct net_device *dev)
 }
 
 /**
- * batadv_softif_init_early - early stage initialization of soft interface
+ * batadv_softif_init_early() - early stage initialization of soft interface
  * @dev: registered network device to modify
  */
 static void batadv_softif_init_early(struct net_device *dev)
@@ -1063,6 +1071,13 @@ static void batadv_softif_init_early(struct net_device *dev)
 	dev->ethtool_ops = &batadv_ethtool_ops;
 }
 
+/**
+ * batadv_softif_create() - Create and register soft interface
+ * @net: the applicable net namespace
+ * @name: name of the new soft interface
+ *
+ * Return: newly allocated soft_interface, NULL on errors
+ */
 struct net_device *batadv_softif_create(struct net *net, const char *name)
 {
 	struct net_device *soft_iface;
@@ -1089,7 +1104,7 @@ struct net_device *batadv_softif_create(struct net *net, const char *name)
 }
 
 /**
- * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs
+ * batadv_softif_destroy_sysfs() - deletion of batadv_soft_interface via sysfs
  * @soft_iface: the to-be-removed batman-adv interface
  */
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
@@ -1111,7 +1126,8 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
 }
 
 /**
- * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink
+ * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
+ *  netlink
  * @soft_iface: the to-be-removed batman-adv interface
  * @head: list pointer
  */
@@ -1139,6 +1155,12 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
 	unregister_netdevice_queue(soft_iface, head);
 }
 
+/**
+ * batadv_softif_is_valid() - Check whether device is a batadv soft interface
+ * @net_dev: device which should be checked
+ *
+ * Return: true when net_dev is a batman-adv interface, false otherwise
+ */
 bool batadv_softif_is_valid(const struct net_device *net_dev)
 {
 	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 639c3abb..075c5b5 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index aa187fd..c1578fa 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -22,10 +23,11 @@
 #include <linux/compiler.h>
 #include <linux/device.h>
 #include <linux/errno.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <linux/kernel.h>
+#include <linux/kobject.h>
 #include <linux/kref.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
@@ -37,6 +39,7 @@
 #include <linux/string.h>
 #include <linux/stringify.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
@@ -45,7 +48,6 @@
 #include "hard-interface.h"
 #include "log.h"
 #include "network-coding.h"
-#include "packet.h"
 #include "soft-interface.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
@@ -63,7 +65,7 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * batadv_vlan_kobj_to_batpriv() - convert a vlan kobj in the associated batpriv
  * @obj: kobject to covert
  *
  * Return: the associated batadv_priv struct.
@@ -83,7 +85,7 @@ static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 }
 
 /**
- * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * batadv_kobj_to_vlan() - convert a kobj in the associated softif_vlan struct
  * @bat_priv: the bat priv with all the soft interface information
  * @obj: kobject to covert
  *
@@ -598,7 +600,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
 }
 
 /**
- * batadv_show_isolation_mark - print the current isolation mark/mask
+ * batadv_show_isolation_mark() - print the current isolation mark/mask
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer that will contain the data to send back to the user
@@ -616,8 +618,8 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
 }
 
 /**
- * batadv_store_isolation_mark - parse and store the isolation mark/mask entered
- *  by the user
+ * batadv_store_isolation_mark() - parse and store the isolation mark/mask
+ *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer containing the user data
@@ -733,6 +735,12 @@ static struct batadv_attribute *batadv_vlan_attrs[] = {
 	NULL,
 };
 
+/**
+ * batadv_sysfs_add_meshif() - Add soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_meshif(struct net_device *dev)
 {
 	struct kobject *batif_kobject = &dev->dev.kobj;
@@ -773,6 +781,10 @@ int batadv_sysfs_add_meshif(struct net_device *dev)
 	return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_meshif() - Remove soft interface specific sysfs entries
+ * @dev: netdev struct of the soft interface
+ */
 void batadv_sysfs_del_meshif(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
@@ -788,7 +800,7 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
 }
 
 /**
- * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * batadv_sysfs_add_vlan() - add all the needed sysfs objects for the new vlan
  * @dev: netdev of the mesh interface
  * @vlan: private data of the newly added VLAN interface
  *
@@ -849,7 +861,7 @@ int batadv_sysfs_add_vlan(struct net_device *dev,
 }
 
 /**
- * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * batadv_sysfs_del_vlan() - remove all the sysfs objects for a given VLAN
  * @bat_priv: the bat priv with all the soft interface information
  * @vlan: the private data of the VLAN to destroy
  */
@@ -894,7 +906,7 @@ static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
 }
 
 /**
- * batadv_store_mesh_iface_finish - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_finish() - store new hardif mesh_iface state
  * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
  * @ifname: name of soft-interface to modify
  *
@@ -947,7 +959,7 @@ static int batadv_store_mesh_iface_finish(struct net_device *net_dev,
 }
 
 /**
- * batadv_store_mesh_iface_work - store new hardif mesh_iface state
+ * batadv_store_mesh_iface_work() - store new hardif mesh_iface state
  * @work: work queue item
  *
  * Changes the parts of the hard+soft interface which can not be modified under
@@ -1043,7 +1055,7 @@ static ssize_t batadv_show_iface_status(struct kobject *kobj,
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
 
 /**
- * batadv_store_throughput_override - parse and store throughput override
+ * batadv_store_throughput_override() - parse and store throughput override
  *  entered by the user
  * @kobj: kobject representing the private mesh sysfs directory
  * @attr: the batman-adv attribute the user is interacting with
@@ -1130,6 +1142,13 @@ static struct batadv_attribute *batadv_batman_attrs[] = {
 	NULL,
 };
 
+/**
+ * batadv_sysfs_add_hardif() - Add hard interface specific sysfs entries
+ * @hardif_obj: address where to store the pointer to new sysfs folder
+ * @dev: netdev struct of the hard interface
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
 {
 	struct kobject *hardif_kobject = &dev->dev.kobj;
@@ -1164,6 +1183,11 @@ int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
 	return -ENOMEM;
 }
 
+/**
+ * batadv_sysfs_del_hardif() - Remove hard interface specific sysfs entries
+ * @hardif_obj: address to the pointer to which stores batman-adv sysfs folder
+ *  of the hard interface
+ */
 void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
 {
 	kobject_uevent(*hardif_obj, KOBJ_REMOVE);
@@ -1172,6 +1196,16 @@ void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
 	*hardif_obj = NULL;
 }
 
+/**
+ * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: subsystem type of event. Stored in uevent's BATTYPE
+ * @action: action type of event. Stored in uevent's BATACTION
+ * @data: string with additional information to the event (ignored for
+ *  BATADV_UEV_DEL). Stored in uevent's BATDATA
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
 int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
 			enum batadv_uev_action action, const char *data)
 {
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index e487412..bbeee61 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
@@ -35,10 +36,23 @@ struct net_device;
  */
 #define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
 
+/**
+ * struct batadv_attribute - sysfs export helper for batman-adv attributes
+ */
 struct batadv_attribute {
+	/** @attr: sysfs attribute file */
 	struct attribute attr;
+
+	/**
+	 * @show: function to export the current attribute's content to sysfs
+	 */
 	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
 			char *buf);
+
+	/**
+	 * @store: function to load new value from character buffer and save it
+	 * in batman-adv attribute
+	 */
 	ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
 			 char *buf, size_t count);
 };
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index ebc4e22..8b57671 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
@@ -19,13 +20,13 @@
 #include "main.h"
 
 #include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/err.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jiffies.h>
@@ -48,13 +49,13 @@
 #include <linux/timer.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "hard-interface.h"
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 
 /**
@@ -97,7 +98,7 @@
 static u8 batadv_tp_prerandom[4096] __read_mostly;
 
 /**
- * batadv_tp_session_cookie - generate session cookie based on session ids
+ * batadv_tp_session_cookie() - generate session cookie based on session ids
  * @session: TP session identifier
  * @icmp_uid: icmp pseudo uid of the tp session
  *
@@ -115,7 +116,7 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
 }
 
 /**
- * batadv_tp_cwnd - compute the new cwnd size
+ * batadv_tp_cwnd() - compute the new cwnd size
  * @base: base cwnd size value
  * @increment: the value to add to base to get the new size
  * @min: minumim cwnd value (usually MSS)
@@ -140,7 +141,7 @@ static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
 }
 
 /**
- * batadv_tp_updated_cwnd - update the Congestion Windows
+ * batadv_tp_updated_cwnd() - update the Congestion Windows
  * @tp_vars: the private data of the current TP meter session
  * @mss: maximum segment size of transmission
  *
@@ -176,7 +177,7 @@ static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
 }
 
 /**
- * batadv_tp_update_rto - calculate new retransmission timeout
+ * batadv_tp_update_rto() - calculate new retransmission timeout
  * @tp_vars: the private data of the current TP meter session
  * @new_rtt: new roundtrip time in msec
  */
@@ -212,7 +213,7 @@ static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_batctl_notify - send client status result to client
+ * batadv_tp_batctl_notify() - send client status result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -244,7 +245,7 @@ static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_batctl_error_notify - send client error result to client
+ * batadv_tp_batctl_error_notify() - send client error result to client
  * @reason: reason for tp meter session stop
  * @dst: destination of tp_meter session
  * @bat_priv: the bat priv with all the soft interface information
@@ -259,7 +260,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
 }
 
 /**
- * batadv_tp_list_find - find a tp_vars object in the global list
+ * batadv_tp_list_find() - find a tp_vars object in the global list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  *
@@ -294,7 +295,8 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_list_find_session - find tp_vars session object in the global list
+ * batadv_tp_list_find_session() - find tp_vars session object in the global
+ *  list
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the other endpoint MAC address to look for
  * @session: session identifier
@@ -335,7 +337,7 @@ batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_vars_release - release batadv_tp_vars from lists and queue for
+ * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the batadv_tp_vars
  */
@@ -360,7 +362,7 @@ static void batadv_tp_vars_release(struct kref *ref)
 }
 
 /**
- * batadv_tp_vars_put - decrement the batadv_tp_vars refcounter and possibly
+ * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
  *  release it
  * @tp_vars: the private data of the current TP meter session to be free'd
  */
@@ -370,7 +372,7 @@ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_cleanup - cleanup sender data and drop and timer
+ * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session to cleanup
  */
@@ -400,7 +402,7 @@ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_end - print info about ended session and inform client
+ * batadv_tp_sender_end() - print info about ended session and inform client
  * @bat_priv: the bat priv with all the soft interface information
  * @tp_vars: the private data of the current TP meter session
  */
@@ -433,7 +435,7 @@ static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_sender_shutdown - let sender thread/timer stop gracefully
+ * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
  * @tp_vars: the private data of the current TP meter session
  * @reason: reason for tp meter session stop
  */
@@ -447,7 +449,7 @@ static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_sender_finish - stop sender session after test_length was reached
+ * batadv_tp_sender_finish() - stop sender session after test_length was reached
  * @work: delayed work reference of the related tp_vars
  */
 static void batadv_tp_sender_finish(struct work_struct *work)
@@ -463,7 +465,7 @@ static void batadv_tp_sender_finish(struct work_struct *work)
 }
 
 /**
- * batadv_tp_reset_sender_timer - reschedule the sender timer
+ * batadv_tp_reset_sender_timer() - reschedule the sender timer
  * @tp_vars: the private TP meter data for this session
  *
  * Reschedule the timer using tp_vars->rto as delay
@@ -481,7 +483,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_sender_timeout - timer that fires in case of packet loss
+ * batadv_tp_sender_timeout() - timer that fires in case of packet loss
  * @t: address to timer_list inside tp_vars
  *
  * If fired it means that there was packet loss.
@@ -531,7 +533,7 @@ static void batadv_tp_sender_timeout(struct timer_list *t)
 }
 
 /**
- * batadv_tp_fill_prerandom - Fill buffer with prefetched random bytes
+ * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
  * @tp_vars: the private TP meter data for this session
  * @buf: Buffer to fill with bytes
  * @nbytes: amount of pseudorandom bytes
@@ -563,7 +565,7 @@ static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_send_msg - send a single message
+ * batadv_tp_send_msg() - send a single message
  * @tp_vars: the private TP meter data for this session
  * @src: source mac address
  * @orig_node: the originator of the destination
@@ -623,7 +625,7 @@ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
 }
 
 /**
- * batadv_tp_recv_ack - ACK receiving function
+ * batadv_tp_recv_ack() - ACK receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -765,7 +767,7 @@ static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_avail - check if congestion window is not full
+ * batadv_tp_avail() - check if congestion window is not full
  * @tp_vars: the private data of the current TP meter session
  * @payload_len: size of the payload of a single message
  *
@@ -783,7 +785,7 @@ static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_wait_available - wait until congestion window becomes free or
+ * batadv_tp_wait_available() - wait until congestion window becomes free or
  *  timeout is reached
  * @tp_vars: the private data of the current TP meter session
  * @plen: size of the payload of a single message
@@ -805,7 +807,7 @@ static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
 }
 
 /**
- * batadv_tp_send - main sending thread of a tp meter session
+ * batadv_tp_send() - main sending thread of a tp meter session
  * @arg: address of the related tp_vars
  *
  * Return: nothing, this function never returns
@@ -904,7 +906,8 @@ static int batadv_tp_send(void *arg)
 }
 
 /**
- * batadv_tp_start_kthread - start new thread which manages the tp meter sender
+ * batadv_tp_start_kthread() - start new thread which manages the tp meter
+ *  sender
  * @tp_vars: the private data of the current TP meter session
  */
 static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
@@ -935,7 +938,7 @@ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_start - start a new tp meter session
+ * batadv_tp_start() - start a new tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @test_length: test length in milliseconds
@@ -1060,7 +1063,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_stop - stop currently running tp meter session
+ * batadv_tp_stop() - stop currently running tp meter session
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the receiver MAC address
  * @return_value: reason for tp meter session stop
@@ -1092,7 +1095,7 @@ void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_reset_receiver_timer - reset the receiver shutdown timer
+ * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
  * @tp_vars: the private data of the current TP meter session
  *
  * start the receiver shutdown timer or reset it if already started
@@ -1104,7 +1107,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is
+ * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
  *  reached without received ack
  * @t: address to timer_list inside tp_vars
  */
@@ -1149,7 +1152,7 @@ static void batadv_tp_receiver_shutdown(struct timer_list *t)
 }
 
 /**
- * batadv_tp_send_ack - send an ACK packet
+ * batadv_tp_send_ack() - send an ACK packet
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: the mac address of the destination originator
  * @seq: the sequence number to ACK
@@ -1221,7 +1224,7 @@ static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst,
 }
 
 /**
- * batadv_tp_handle_out_of_order - store an out of order packet
+ * batadv_tp_handle_out_of_order() - store an out of order packet
  * @tp_vars: the private data of the current TP meter session
  * @skb: the buffer containing the received packet
  *
@@ -1297,7 +1300,7 @@ static bool batadv_tp_handle_out_of_order(struct batadv_tp_vars *tp_vars,
 }
 
 /**
- * batadv_tp_ack_unordered - update number received bytes in current stream
+ * batadv_tp_ack_unordered() - update number received bytes in current stream
  *  without gaps
  * @tp_vars: the private data of the current TP meter session
  */
@@ -1330,7 +1333,7 @@ static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
 }
 
 /**
- * batadv_tp_init_recv - return matching or create new receiver tp_vars
+ * batadv_tp_init_recv() - return matching or create new receiver tp_vars
  * @bat_priv: the bat priv with all the soft interface information
  * @icmp: received icmp tp msg
  *
@@ -1383,7 +1386,7 @@ batadv_tp_init_recv(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_recv_msg - process a single data message
+ * batadv_tp_recv_msg() - process a single data message
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  *
@@ -1468,7 +1471,7 @@ static void batadv_tp_recv_msg(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tp_meter_recv - main TP Meter receiving function
+ * batadv_tp_meter_recv() - main TP Meter receiving function
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the buffer containing the received packet
  */
@@ -1494,7 +1497,7 @@ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
 }
 
 /**
- * batadv_tp_meter_init - initialize global tp_meter structures
+ * batadv_tp_meter_init() - initialize global tp_meter structures
  */
 void __init batadv_tp_meter_init(void)
 {
diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h
index a8ada5c..c8b8f2c 100644
--- a/net/batman-adv/tp_meter.h
+++ b/net/batman-adv/tp_meter.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8a3ce79..7550a9c 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
@@ -20,14 +21,14 @@
 
 #include <linux/atomic.h>
 #include <linux/bitops.h>
-#include <linux/bug.h>
+#include <linux/build_bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
 #include <linux/crc32c.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/init.h>
 #include <linux/jhash.h>
@@ -36,6 +37,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/net.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
@@ -50,6 +52,7 @@
 #include <net/genetlink.h>
 #include <net/netlink.h>
 #include <net/sock.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
 #include "bridge_loop_avoidance.h"
@@ -58,7 +61,6 @@
 #include "log.h"
 #include "netlink.h"
 #include "originator.h"
-#include "packet.h"
 #include "soft-interface.h"
 #include "tvlv.h"
 
@@ -86,7 +88,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
 				 bool roaming);
 
 /**
- * batadv_compare_tt - check if two TT entries are the same
+ * batadv_compare_tt() - check if two TT entries are the same
  * @node: the list element pointer of the first TT entry
  * @data2: pointer to the tt_common_entry of the second TT entry
  *
@@ -105,7 +107,7 @@ static bool batadv_compare_tt(const struct hlist_node *node, const void *data2)
 }
 
 /**
- * batadv_choose_tt - return the index of the tt entry in the hash table
+ * batadv_choose_tt() - return the index of the tt entry in the hash table
  * @data: pointer to the tt_common_entry object to map
  * @size: the size of the hash table
  *
@@ -125,7 +127,7 @@ static inline u32 batadv_choose_tt(const void *data, u32 size)
 }
 
 /**
- * batadv_tt_hash_find - look for a client in the given hash table
+ * batadv_tt_hash_find() - look for a client in the given hash table
  * @hash: the hash table to search
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -170,7 +172,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_hash_find - search the local table for a given client
+ * batadv_tt_local_hash_find() - search the local table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -195,7 +197,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_global_hash_find - search the global table for a given client
+ * batadv_tt_global_hash_find() - search the global table for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
@@ -220,7 +222,7 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_entry_free_rcu - free the tt_local_entry
+ * batadv_tt_local_entry_free_rcu() - free the tt_local_entry
  * @rcu: rcu pointer of the tt_local_entry
  */
 static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
@@ -234,7 +236,7 @@ static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_local_entry_release - release tt_local_entry from lists and queue
+ * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue
  *  for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
@@ -251,7 +253,7 @@ static void batadv_tt_local_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_local_entry_put - decrement the tt_local_entry refcounter and
+ * batadv_tt_local_entry_put() - decrement the tt_local_entry refcounter and
  *  possibly release it
  * @tt_local_entry: tt_local_entry to be free'd
  */
@@ -263,7 +265,7 @@ batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
 }
 
 /**
- * batadv_tt_global_entry_free_rcu - free the tt_global_entry
+ * batadv_tt_global_entry_free_rcu() - free the tt_global_entry
  * @rcu: rcu pointer of the tt_global_entry
  */
 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
@@ -277,8 +279,8 @@ static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_global_entry_release - release tt_global_entry from lists and queue
- *  for free after rcu grace period
+ * batadv_tt_global_entry_release() - release tt_global_entry from lists and
+ *  queue for free after rcu grace period
  * @ref: kref pointer of the nc_node
  */
 static void batadv_tt_global_entry_release(struct kref *ref)
@@ -294,7 +296,7 @@ static void batadv_tt_global_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_global_entry_put - decrement the tt_global_entry refcounter and
+ * batadv_tt_global_entry_put() - decrement the tt_global_entry refcounter and
  *  possibly release it
  * @tt_global_entry: tt_global_entry to be free'd
  */
@@ -306,7 +308,7 @@ batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_hash_count - count the number of orig entries
+ * batadv_tt_global_hash_count() - count the number of orig entries
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to count entries for
  * @vid: VLAN identifier
@@ -331,8 +333,8 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_mod - change the size by v of the local table identified
- *  by vid
+ * batadv_tt_local_size_mod() - change the size by v of the local table
+ *  identified by vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier of the sub-table to change
  * @v: the amount to sum to the local table size
@@ -352,8 +354,8 @@ static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_inc - increase by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_inc() - increase by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -364,8 +366,8 @@ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_size_dec - decrease by one the local table size for the given
- *  vid
+ * batadv_tt_local_size_dec() - decrease by one the local table size for the
+ *  given vid
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  */
@@ -376,7 +378,7 @@ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_size_mod - change the size by v of the global table
+ * batadv_tt_global_size_mod() - change the size by v of the global table
  *  for orig_node identified by vid
  * @orig_node: the originator for which the table has to be modified
  * @vid: the VLAN identifier
@@ -404,7 +406,7 @@ static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_inc - increase by one the global table size for the
+ * batadv_tt_global_size_inc() - increase by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -416,7 +418,7 @@ static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_global_size_dec - decrease by one the global table size for the
+ * batadv_tt_global_size_dec() - decrease by one the global table size for the
  *  given vid
  * @orig_node: the originator which global table size has to be decreased
  * @vid: the vlan identifier
@@ -428,7 +430,7 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_orig_list_entry_free_rcu - free the orig_entry
+ * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry
  * @rcu: rcu pointer of the orig_entry
  */
 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
@@ -441,7 +443,7 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 }
 
 /**
- * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
+ * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and
  *  queue for free after rcu grace period
  * @ref: kref pointer of the tt orig entry
  */
@@ -457,7 +459,7 @@ static void batadv_tt_orig_list_entry_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_orig_list_entry_put - decrement the tt orig entry refcounter and
+ * batadv_tt_orig_list_entry_put() - decrement the tt orig entry refcounter and
  *  possibly release it
  * @orig_entry: tt orig entry to be free'd
  */
@@ -468,7 +470,7 @@ batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry)
 }
 
 /**
- * batadv_tt_local_event - store a local TT event (ADD/DEL)
+ * batadv_tt_local_event() - store a local TT event (ADD/DEL)
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_local_entry: the TT entry involved in the event
  * @event_flags: flags to store in the event structure
@@ -543,7 +545,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_len - compute length in bytes of given number of tt changes
+ * batadv_tt_len() - compute length in bytes of given number of tt changes
  * @changes_num: number of tt changes
  *
  * Return: computed length in bytes.
@@ -554,7 +556,7 @@ static int batadv_tt_len(int changes_num)
 }
 
 /**
- * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * batadv_tt_entries() - compute the number of entries fitting in tt_len bytes
  * @tt_len: available space
  *
  * Return: the number of entries.
@@ -565,8 +567,8 @@ static u16 batadv_tt_entries(u16 tt_len)
 }
 
 /**
- * batadv_tt_local_table_transmit_size - calculates the local translation table
- *  size when transmitted over the air
+ * batadv_tt_local_table_transmit_size() - calculates the local translation
+ *  table size when transmitted over the air
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: local translation table size in bytes.
@@ -625,7 +627,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_add - add a new client to the local table or update an
+ * batadv_tt_local_add() - add a new client to the local table or update an
  *  existing client
  * @soft_iface: netdev struct of the mesh interface
  * @addr: the mac address of the client to add
@@ -830,7 +832,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
 }
 
 /**
- * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ * batadv_tt_prepare_tvlv_global_data() - prepare the TVLV TT header to send
  *  within a TT Response directed to another node
  * @orig_node: originator for which the TT data has to be prepared
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
@@ -903,8 +905,8 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
- *  node
+ * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for
+ *  this node
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: uninitialised pointer to the address of the TVLV buffer
  * @tt_change: uninitialised pointer to the address of the area where the TT
@@ -977,8 +979,8 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_tvlv_container_update - update the translation table tvlv container
- *  after local tt changes have been committed
+ * batadv_tt_tvlv_container_update() - update the translation table tvlv
+ *  container after local tt changes have been committed
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
@@ -1053,6 +1055,14 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
 }
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
+/**
+ * batadv_tt_local_seq_print_text() - Print the local tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1123,7 +1133,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_tt_local_dump_entry - Dump one TT local entry into a message
+ * batadv_tt_local_dump_entry() - Dump one TT local entry into a message
  * @msg :Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1179,7 +1189,7 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -1216,7 +1226,7 @@ batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_local_dump - Dump TT local entries into a message
+ * batadv_tt_local_dump() - Dump TT local entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -1300,7 +1310,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_remove - logically remove an entry from the local table
+ * batadv_tt_local_remove() - logically remove an entry from the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the MAC address of the client to remove
  * @vid: VLAN identifier
@@ -1362,7 +1372,7 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_tt_local_purge_list - purge inactive tt local entries
+ * batadv_tt_local_purge_list() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @head: pointer to the list containing the local tt entries
  * @timeout: parameter deciding whether a given tt local entry is considered
@@ -1397,7 +1407,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_purge - purge inactive tt local entries
+ * batadv_tt_local_purge() - purge inactive tt local entries
  * @bat_priv: the bat priv with all the soft interface information
  * @timeout: parameter deciding whether a given tt local entry is considered
  *  inactive or not
@@ -1490,7 +1500,7 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_orig_entry_find - find a TT orig_list_entry
+ * batadv_tt_global_orig_entry_find() - find a TT orig_list_entry
  * @entry: the TT global entry where the orig_list_entry has to be
  *  extracted from
  * @orig_node: the originator for which the orig_list_entry has to be found
@@ -1524,8 +1534,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_entry_has_orig - check if a TT global entry is also handled
- *  by a given originator
+ * batadv_tt_global_entry_has_orig() - check if a TT global entry is also
+ *  handled by a given originator
  * @entry: the TT global entry to check
  * @orig_node: the originator to search in the list
  *
@@ -1550,7 +1560,7 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
 }
 
 /**
- * batadv_tt_global_sync_flags - update TT sync flags
+ * batadv_tt_global_sync_flags() - update TT sync flags
  * @tt_global: the TT global entry to update sync flags in
  *
  * Updates the sync flag bits in the tt_global flag attribute with a logical
@@ -1574,7 +1584,7 @@ batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
 }
 
 /**
- * batadv_tt_global_orig_entry_add - add or update a TT orig entry
+ * batadv_tt_global_orig_entry_add() - add or update a TT orig entry
  * @tt_global: the TT global entry to add an orig entry in
  * @orig_node: the originator to add an orig entry for
  * @ttvn: translation table version number of this changeset
@@ -1624,7 +1634,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
 }
 
 /**
- * batadv_tt_global_add - add a new TT global entry or update an existing one
+ * batadv_tt_global_add() - add a new TT global entry or update an existing one
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator announcing the client
  * @tt_addr: the mac address of the non-mesh client
@@ -1796,7 +1806,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * batadv_transtable_best_orig() - Get best originator list entry from tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
@@ -1842,8 +1852,8 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
 
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
 /**
- * batadv_tt_global_print_entry - print all orig nodes who announce the address
- *  for this global entry
+ * batadv_tt_global_print_entry() - print all orig nodes who announce the
+ *  address for this global entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
@@ -1925,6 +1935,13 @@ batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
 	}
 }
 
+/**
+ * batadv_tt_global_seq_print_text() - Print the global tt table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 {
 	struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1967,7 +1984,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
 #endif
 
 /**
- * batadv_tt_global_dump_subentry - Dump all TT local entries into a message
+ * batadv_tt_global_dump_subentry() - Dump all TT local entries into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2028,7 +2045,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_entry - Dump one TT global entry into a message
+ * batadv_tt_global_dump_entry() - Dump one TT global entry into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2073,7 +2090,7 @@ batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump_bucket - Dump one TT local bucket into a message
+ * batadv_tt_global_dump_bucket() - Dump one TT local bucket into a message
  * @msg: Netlink message to dump into
  * @portid: Port making netlink request
  * @seq: Sequence number of netlink message
@@ -2112,7 +2129,7 @@ batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
 }
 
 /**
- * batadv_tt_global_dump -  Dump TT global entries into a message
+ * batadv_tt_global_dump() -  Dump TT global entries into a message
  * @msg: Netlink message to dump into
  * @cb: Parameters from query
  *
@@ -2180,7 +2197,7 @@ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb)
 }
 
 /**
- * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry() - remove and free an orig_entry
  * @tt_global_entry: the global entry to remove the orig_entry from
  * @orig_entry: the orig entry to remove and free
  *
@@ -2222,7 +2239,7 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
 }
 
 /**
- * batadv_tt_global_del_orig_node - remove orig_node from a global tt entry
+ * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: the global entry to remove the orig_node from
  * @orig_node: the originator announcing the client
@@ -2301,7 +2318,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_del - remove a client from the global table
+ * batadv_tt_global_del() - remove a client from the global table
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: an originator serving this client
  * @addr: the mac address of the client
@@ -2367,8 +2384,8 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
- *  given originator matching the provided vid
+ * batadv_tt_global_del_orig() - remove all the TT global entries belonging to
+ *  the given originator matching the provided vid
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator owning the entries to remove
  * @match_vid: the VLAN identifier to match. If negative all the entries will be
@@ -2539,7 +2556,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
 }
 
 /**
- * batadv_transtable_search - get the mesh destination for a given client
+ * batadv_transtable_search() - get the mesh destination for a given client
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of the source client
  * @addr: mac address of the destination client
@@ -2599,7 +2616,7 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ * batadv_tt_global_crc() - calculates the checksum of the local table belonging
  *  to the given orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator for which the CRC should be computed
@@ -2694,7 +2711,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_crc - calculates the checksum of the local table
+ * batadv_tt_local_crc() - calculates the checksum of the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: VLAN identifier for which the CRC32 has to be computed
  *
@@ -2751,7 +2768,7 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_req_node_release - free tt_req node entry
+ * batadv_tt_req_node_release() - free tt_req node entry
  * @ref: kref pointer of the tt req_node entry
  */
 static void batadv_tt_req_node_release(struct kref *ref)
@@ -2764,7 +2781,7 @@ static void batadv_tt_req_node_release(struct kref *ref)
 }
 
 /**
- * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ * batadv_tt_req_node_put() - decrement the tt_req_node refcounter and
  *  possibly release it
  * @tt_req_node: tt_req_node to be free'd
  */
@@ -2826,7 +2843,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * batadv_tt_req_node_new() - search and possibly create a tt_req_node object
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node this request is being issued for
  *
@@ -2863,7 +2880,7 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * batadv_tt_local_valid() - verify that given tt entry is a valid one
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
@@ -2897,7 +2914,7 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
 }
 
 /**
- * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the
  *  specified tt hash
  * @bat_priv: the bat priv with all the soft interface information
  * @hash: hash table containing the tt entries
@@ -2948,7 +2965,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * batadv_tt_global_check_crc() - check if all the CRCs are correct
  * @orig_node: originator for which the CRCs have to be checked
  * @tt_vlan: pointer to the first tvlv VLAN entry
  * @num_vlan: number of tvlv VLAN entries
@@ -3005,7 +3022,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
 }
 
 /**
- * batadv_tt_local_update_crc - update all the local CRCs
+ * batadv_tt_local_update_crc() - update all the local CRCs
  * @bat_priv: the bat priv with all the soft interface information
  */
 static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
@@ -3021,7 +3038,7 @@ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node for which the CRCs have to be updated
  */
@@ -3048,7 +3065,7 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_tt_request - send a TT Request message to a given node
+ * batadv_send_tt_request() - send a TT Request message to a given node
  * @bat_priv: the bat priv with all the soft interface information
  * @dst_orig_node: the destination of the message
  * @ttvn: the version number that the source of the message is looking for
@@ -3137,7 +3154,7 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_other_tt_response - send reply to tt request concerning another
+ * batadv_send_other_tt_response() - send reply to tt request concerning another
  *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
@@ -3270,8 +3287,8 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_my_tt_response - send reply to tt request concerning this node's
- *  translation table
+ * batadv_send_my_tt_response() - send reply to tt request concerning this
+ *  node's translation table
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3388,7 +3405,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_send_tt_response - send reply to tt request
+ * batadv_send_tt_response() - send reply to tt request
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
@@ -3484,7 +3501,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_is_my_client - check if a client is served by the local node
+ * batadv_is_my_client() - check if a client is served by the local node
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -3514,7 +3531,7 @@ bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
 }
 
 /**
- * batadv_handle_tt_response - process incoming tt reply
+ * batadv_handle_tt_response() - process incoming tt reply
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_data: tt data containing the tt request information
  * @resp_src: mac address of tt reply sender
@@ -3607,7 +3624,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_check_roam_count - check if a client has roamed too frequently
+ * batadv_tt_check_roam_count() - check if a client has roamed too frequently
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  *
@@ -3662,7 +3679,7 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
 }
 
 /**
- * batadv_send_roam_adv - send a roaming advertisement message
+ * batadv_send_roam_adv() - send a roaming advertisement message
  * @bat_priv: the bat priv with all the soft interface information
  * @client: mac address of the roaming client
  * @vid: VLAN identifier
@@ -3727,6 +3744,10 @@ static void batadv_tt_purge(struct work_struct *work)
 			   msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
 }
 
+/**
+ * batadv_tt_free() - Free translation table of soft interface
+ * @bat_priv: the bat priv with all the soft interface information
+ */
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
 	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
@@ -3744,7 +3765,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ * batadv_tt_local_set_flags() - set or unset the specified flags on the local
  *  table and possibly count them in the TT size
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: the flag to switch
@@ -3830,7 +3851,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes
  *  which have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -3863,7 +3884,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ * batadv_tt_local_commit_changes() - commit all pending local tt changes which
  *  have been queued in the time since the last commit
  * @bat_priv: the bat priv with all the soft interface information
  */
@@ -3874,6 +3895,15 @@ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
 	spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
+/**
+ * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of packet
+ * @dst: destination mac address of packet
+ * @vid: vlan id of packet
+ *
+ * Return: true when src+dst(+vid) pair should be isolated, false otherwise
+ */
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
 			   unsigned short vid)
 {
@@ -3909,7 +3939,7 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
 }
 
 /**
- * batadv_tt_update_orig - update global translation table with new tt
+ * batadv_tt_update_orig() - update global translation table with new tt
  *  information received via ogms
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the orig_node of the ogm
@@ -3994,7 +4024,7 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
@@ -4020,7 +4050,7 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_client_is_roaming - tells whether the client is roaming
+ * batadv_tt_local_client_is_roaming() - tells whether the client is roaming
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the local client to query
  * @vid: VLAN identifier
@@ -4045,6 +4075,15 @@ bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
 	return ret;
 }
 
+/**
+ * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node which the temporary entry should be associated with
+ * @addr: mac address of the client
+ * @vid: VLAN id of the new temporary global translation table
+ *
+ * Return: true when temporary tt entry could be added, false otherwise
+ */
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
 					  const unsigned char *addr,
@@ -4069,7 +4108,7 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the
  *  maximum packet size that can be transported through the mesh
  * @soft_iface: netdev struct of the mesh interface
  *
@@ -4110,7 +4149,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
 }
 
 /**
- * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the orig_node of the ogm
  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
@@ -4149,7 +4188,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv
  *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
@@ -4231,7 +4270,8 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv
+ *  container
  * @bat_priv: the bat priv with all the soft interface information
  * @src: mac address of tt tvlv sender
  * @dst: mac address of tt tvlv recipient
@@ -4281,7 +4321,7 @@ static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_init - initialise the translation table internals
+ * batadv_tt_init() - initialise the translation table internals
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure.
@@ -4317,7 +4357,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tt_global_is_isolated - check if a client is marked as isolated
+ * batadv_tt_global_is_isolated() - check if a client is marked as isolated
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client
  * @vid: the identifier of the VLAN where this client is connected
@@ -4343,7 +4383,7 @@ bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_cache_init - Initialize tt memory object cache
+ * batadv_tt_cache_init() - Initialize tt memory object cache
  *
  * Return: 0 on success or negative error number in case of failure.
  */
@@ -4412,7 +4452,7 @@ int __init batadv_tt_cache_init(void)
 }
 
 /**
- * batadv_tt_cache_destroy - Destroy tt memory object cache
+ * batadv_tt_cache_destroy() - Destroy tt memory object cache
  */
 void batadv_tt_cache_destroy(void)
 {
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 411d586..8d9e3ab 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 1d9e267..5ffcb45 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -19,7 +20,7 @@
 
 #include <linux/byteorder/generic.h>
 #include <linux/etherdevice.h>
-#include <linux/fs.h>
+#include <linux/gfp.h>
 #include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <linux/kref.h>
@@ -35,14 +36,14 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <uapi/linux/batadv_packet.h>
 
 #include "originator.h"
-#include "packet.h"
 #include "send.h"
 #include "tvlv.h"
 
 /**
- * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
+ * batadv_tvlv_handler_release() - release tvlv handler from lists and queue for
  *  free after rcu grace period
  * @ref: kref pointer of the tvlv
  */
@@ -55,7 +56,7 @@ static void batadv_tvlv_handler_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_handler_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv_handler: the tvlv handler to free
  */
@@ -65,7 +66,7 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
 }
 
 /**
- * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ * batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list
  *  based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to look for
@@ -99,7 +100,7 @@ batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_release - release tvlv from lists and free
+ * batadv_tvlv_container_release() - release tvlv from lists and free
  * @ref: kref pointer of the tvlv
  */
 static void batadv_tvlv_container_release(struct kref *ref)
@@ -111,7 +112,7 @@ static void batadv_tvlv_container_release(struct kref *ref)
 }
 
 /**
- * batadv_tvlv_container_put - decrement the tvlv container refcounter and
+ * batadv_tvlv_container_put() - decrement the tvlv container refcounter and
  *  possibly release it
  * @tvlv: the tvlv container to free
  */
@@ -121,7 +122,7 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
 }
 
 /**
- * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ * batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container
  *  list based on the provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to look for
@@ -155,7 +156,7 @@ batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 }
 
 /**
- * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ * batadv_tvlv_container_list_size() - calculate the size of the tvlv container
  *  list entries
  * @bat_priv: the bat priv with all the soft interface information
  *
@@ -180,8 +181,8 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
- *  list
+ * batadv_tvlv_container_remove() - remove tvlv container from the tvlv
+ *  container list
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv: the to be removed tvlv container
  *
@@ -204,7 +205,7 @@ static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ * batadv_tvlv_container_unregister() - unregister tvlv container based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type to unregister
@@ -222,7 +223,7 @@ void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_container_register - register tvlv type, version and content
+ * batadv_tvlv_container_register() - register tvlv type, version and content
  *  to be propagated with each (primary interface) OGM
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv container type
@@ -267,7 +268,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
+ * batadv_tvlv_realloc_packet_buff() - reallocate packet buffer to accommodate
  *  requested packet size
  * @packet_buff: packet buffer
  * @packet_buff_len: packet buffer size
@@ -300,7 +301,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
 }
 
 /**
- * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ * batadv_tvlv_container_ogm_append() - append tvlv container content to given
  *  OGM packet buffer
  * @bat_priv: the bat priv with all the soft interface information
  * @packet_buff: ogm packet buffer
@@ -353,7 +354,7 @@ u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ * batadv_tvlv_call_handler() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @tvlv_handler: tvlv callback function handling the tvlv content
@@ -407,7 +408,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ * batadv_tvlv_containers_process() - parse the given tvlv buffer to call the
  *  appropriate handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
@@ -474,7 +475,7 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ * batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate
  *  handlers
  * @bat_priv: the bat priv with all the soft interface information
  * @batadv_ogm_packet: ogm packet containing the tvlv containers
@@ -501,7 +502,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ * batadv_tvlv_handler_register() - register tvlv handler based on the provided
  *  type and version (both need to match) for ogm tvlv payload and/or unicast
  *  payload
  * @bat_priv: the bat priv with all the soft interface information
@@ -556,7 +557,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ * batadv_tvlv_handler_unregister() - unregister tvlv handler based on the
  *  provided type and version (both need to match)
  * @bat_priv: the bat priv with all the soft interface information
  * @type: tvlv handler type to be unregistered
@@ -579,7 +580,7 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ * batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the
  *  specified host
  * @bat_priv: the bat priv with all the soft interface information
  * @src: source mac address of the unicast packet
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index 4d01400..a74df33 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index a627958..bb15784 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
@@ -34,10 +35,9 @@
 #include <linux/types.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <uapi/linux/batadv_packet.h>
 #include <uapi/linux/batman_adv.h>
 
-#include "packet.h"
-
 struct seq_file;
 
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -54,13 +54,15 @@ struct seq_file;
 
 /**
  * enum batadv_dhcp_recipient - dhcp destination
- * @BATADV_DHCP_NO: packet is not a dhcp message
- * @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server
- * @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client
  */
 enum batadv_dhcp_recipient {
+	/** @BATADV_DHCP_NO: packet is not a dhcp message */
 	BATADV_DHCP_NO = 0,
+
+	/** @BATADV_DHCP_TO_SERVER: dhcp message is directed to a server */
 	BATADV_DHCP_TO_SERVER,
+
+	/** @BATADV_DHCP_TO_CLIENT: dhcp message is directed to a client */
 	BATADV_DHCP_TO_CLIENT,
 };
 
@@ -78,196 +80,274 @@ enum batadv_dhcp_recipient {
 
 /**
  * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
  */
 struct batadv_hard_iface_bat_iv {
+	/** @ogm_buff: buffer holding the OGM packet */
 	unsigned char *ogm_buff;
+
+	/** @ogm_buff_len: length of the OGM packet buffer */
 	int ogm_buff_len;
+
+	/** @ogm_seqno: OGM sequence number - used to identify each OGM */
 	atomic_t ogm_seqno;
 };
 
 /**
  * enum batadv_v_hard_iface_flags - interface flags useful to B.A.T.M.A.N. V
- * @BATADV_FULL_DUPLEX: tells if the connection over this link is full-duplex
- * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that no
- *  throughput data is available for this interface and that default values are
- *  assumed.
  */
 enum batadv_v_hard_iface_flags {
+	/**
+	 * @BATADV_FULL_DUPLEX: tells if the connection over this link is
+	 *  full-duplex
+	 */
 	BATADV_FULL_DUPLEX	= BIT(0),
+
+	/**
+	 * @BATADV_WARNING_DEFAULT: tells whether we have warned the user that
+	 *  no throughput data is available for this interface and that default
+	 *  values are assumed.
+	 */
 	BATADV_WARNING_DEFAULT	= BIT(1),
 };
 
 /**
  * struct batadv_hard_iface_bat_v - per hard-interface B.A.T.M.A.N. V data
- * @elp_interval: time interval between two ELP transmissions
- * @elp_seqno: current ELP sequence number
- * @elp_skb: base skb containing the ELP message to send
- * @elp_wq: workqueue used to schedule ELP transmissions
- * @throughput_override: throughput override to disable link auto-detection
- * @flags: interface specific flags
  */
 struct batadv_hard_iface_bat_v {
+	/** @elp_interval: time interval between two ELP transmissions */
 	atomic_t elp_interval;
+
+	/** @elp_seqno: current ELP sequence number */
 	atomic_t elp_seqno;
+
+	/** @elp_skb: base skb containing the ELP message to send */
 	struct sk_buff *elp_skb;
+
+	/** @elp_wq: workqueue used to schedule ELP transmissions */
 	struct delayed_work elp_wq;
+
+	/**
+	 * @throughput_override: throughput override to disable link
+	 *  auto-detection
+	 */
 	atomic_t throughput_override;
+
+	/** @flags: interface specific flags */
 	u8 flags;
 };
 
 /**
  * enum batadv_hard_iface_wifi_flags - Flags describing the wifi configuration
  *  of a batadv_hard_iface
- * @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device
- * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
- * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi device
  */
 enum batadv_hard_iface_wifi_flags {
+	/** @BATADV_HARDIF_WIFI_WEXT_DIRECT: it is a wext wifi device */
 	BATADV_HARDIF_WIFI_WEXT_DIRECT = BIT(0),
+
+	/** @BATADV_HARDIF_WIFI_CFG80211_DIRECT: it is a cfg80211 wifi device */
 	BATADV_HARDIF_WIFI_CFG80211_DIRECT = BIT(1),
+
+	/**
+	 * @BATADV_HARDIF_WIFI_WEXT_INDIRECT: link device is a wext wifi device
+	 */
 	BATADV_HARDIF_WIFI_WEXT_INDIRECT = BIT(2),
+
+	/**
+	 * @BATADV_HARDIF_WIFI_CFG80211_INDIRECT: link device is a cfg80211 wifi
+	 * device
+	 */
 	BATADV_HARDIF_WIFI_CFG80211_INDIRECT = BIT(3),
 };
 
 /**
  * struct batadv_hard_iface - network device known to batman-adv
- * @list: list node for batadv_hardif_list
- * @if_num: identificator of the interface
- * @if_status: status of the interface for batman-adv
- * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
- * @wifi_flags: flags whether this is (directly or indirectly) a wifi interface
- * @net_dev: pointer to the net_device
- * @hardif_obj: kobject of the per interface sysfs "mesh" directory
- * @refcount: number of contexts the object is used
- * @batman_adv_ptype: packet type describing packets that should be processed by
- *  batman-adv for this interface
- * @soft_iface: the batman-adv interface which uses this network interface
- * @rcu: struct used for freeing in an RCU-safe manner
- * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
- * @bat_v: per hard-interface B.A.T.M.A.N. V data
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @neigh_list: list of unique single hop neighbors via this interface
- * @neigh_list_lock: lock protecting neigh_list
  */
 struct batadv_hard_iface {
+	/** @list: list node for batadv_hardif_list */
 	struct list_head list;
+
+	/** @if_num: identificator of the interface */
 	s16 if_num;
+
+	/** @if_status: status of the interface for batman-adv */
 	char if_status;
+
+	/**
+	 * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
+	 */
 	u8 num_bcasts;
+
+	/**
+	 * @wifi_flags: flags whether this is (directly or indirectly) a wifi
+	 *  interface
+	 */
 	u32 wifi_flags;
+
+	/** @net_dev: pointer to the net_device */
 	struct net_device *net_dev;
+
+	/** @hardif_obj: kobject of the per interface sysfs "mesh" directory */
 	struct kobject *hardif_obj;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/**
+	 * @batman_adv_ptype: packet type describing packets that should be
+	 * processed by batman-adv for this interface
+	 */
 	struct packet_type batman_adv_ptype;
+
+	/**
+	 * @soft_iface: the batman-adv interface which uses this network
+	 *  interface
+	 */
 	struct net_device *soft_iface;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */
 	struct batadv_hard_iface_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: per hard-interface B.A.T.M.A.N. V data */
 	struct batadv_hard_iface_bat_v bat_v;
 #endif
+
+	/**
+	 * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+	 */
 	struct dentry *debug_dir;
+
+	/**
+	 * @neigh_list: list of unique single hop neighbors via this interface
+	 */
 	struct hlist_head neigh_list;
-	/* neigh_list_lock protects: neigh_list */
+
+	/** @neigh_list_lock: lock protecting neigh_list */
 	spinlock_t neigh_list_lock;
 };
 
 /**
  * struct batadv_orig_ifinfo - originator info per outgoing interface
- * @list: list node for orig_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @router: router that should be used to reach this originator
- * @last_real_seqno: last and best known sequence number
- * @last_ttl: ttl of last received packet
- * @last_seqno_forwarded: seqno of the OGM which was forwarded last
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_orig_ifinfo {
+	/** @list: list node for &batadv_orig_node.ifinfo_list */
 	struct hlist_node list;
+
+	/** @if_outgoing: pointer to outgoing hard-interface */
 	struct batadv_hard_iface *if_outgoing;
-	struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
+
+	/** @router: router that should be used to reach this originator */
+	struct batadv_neigh_node __rcu *router;
+
+	/** @last_real_seqno: last and best known sequence number */
 	u32 last_real_seqno;
+
+	/** @last_ttl: ttl of last received packet */
 	u8 last_ttl;
+
+	/** @last_seqno_forwarded: seqno of the OGM which was forwarded last */
 	u32 last_seqno_forwarded;
+
+	/** @batman_seqno_reset: time when the batman seqno window was reset */
 	unsigned long batman_seqno_reset;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_frag_table_entry - head in the fragment buffer table
- * @fragment_list: head of list with fragments
- * @lock: lock to protect the list of fragments
- * @timestamp: time (jiffie) of last received fragment
- * @seqno: sequence number of the fragments in the list
- * @size: accumulated size of packets in list
- * @total_size: expected size of the assembled packet
  */
 struct batadv_frag_table_entry {
+	/** @fragment_list: head of list with fragments */
 	struct hlist_head fragment_list;
-	spinlock_t lock; /* protects fragment_list */
+
+	/** @lock: lock to protect the list of fragments */
+	spinlock_t lock;
+
+	/** @timestamp: time (jiffie) of last received fragment */
 	unsigned long timestamp;
+
+	/** @seqno: sequence number of the fragments in the list */
 	u16 seqno;
+
+	/** @size: accumulated size of packets in list */
 	u16 size;
+
+	/** @total_size: expected size of the assembled packet */
 	u16 total_size;
 };
 
 /**
  * struct batadv_frag_list_entry - entry in a list of fragments
- * @list: list node information
- * @skb: fragment
- * @no: fragment number in the set
  */
 struct batadv_frag_list_entry {
+	/** @list: list node information */
 	struct hlist_node list;
+
+	/** @skb: fragment */
 	struct sk_buff *skb;
+
+	/** @no: fragment number in the set */
 	u8 no;
 };
 
 /**
  * struct batadv_vlan_tt - VLAN specific TT attributes
- * @crc: CRC32 checksum of the entries belonging to this vlan
- * @num_entries: number of TT entries for this VLAN
  */
 struct batadv_vlan_tt {
+	/** @crc: CRC32 checksum of the entries belonging to this vlan */
 	u32 crc;
+
+	/** @num_entries: number of TT entries for this VLAN */
 	atomic_t num_entries;
 };
 
 /**
  * struct batadv_orig_node_vlan - VLAN specific data per orig_node
- * @vid: the VLAN identifier
- * @tt: VLAN specific TT attributes
- * @list: list node for orig_node::vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_orig_node_vlan {
+	/** @vid: the VLAN identifier */
 	unsigned short vid;
+
+	/** @tt: VLAN specific TT attributes */
 	struct batadv_vlan_tt tt;
+
+	/** @list: list node for &batadv_orig_node.vlan_list */
 	struct hlist_node list;
+
+	/**
+	 * @refcount: number of context where this object is currently in use
+	 */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: set of bitfields (one per hard-interface) where each one counts
- * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
- * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
- * @bcast_own_sum: sum of bcast_own
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
  */
 struct batadv_orig_bat_iv {
+	/**
+	 * @bcast_own: set of bitfields (one per hard-interface) where each one
+	 * counts the number of our OGMs this orig_node rebroadcasted "back" to
+	 * us  (relative to last_real_seqno). Every bitfield is
+	 * BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+	 */
 	unsigned long *bcast_own;
+
+	/** @bcast_own_sum: sum of bcast_own */
 	u8 *bcast_own_sum;
-	/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+
+	/**
+	 * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
 	 * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
 	 */
 	spinlock_t ogm_cnt_lock;
@@ -275,130 +355,205 @@ struct batadv_orig_bat_iv {
 
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
- * @orig: originator ethernet address
- * @ifinfo_list: list for routers per outgoing interface
- * @last_bonding_candidate: pointer to last ifinfo of last used router
- * @dat_addr: address of the orig node in the distributed hash
- * @last_seen: time when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
- * @mcast_flags: multicast flags announced by the orig node
- * @mcast_want_all_unsnoopables_node: a list node for the
- *  mcast.want_all_unsnoopables list
- * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list
- * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list
- * @capabilities: announced capabilities of this originator
- * @capa_initialized: bitfield to remember whether a capability was initialized
- * @last_ttvn: last seen translation table version number
- * @tt_buff: last tt changeset this node received from the orig node
- * @tt_buff_len: length of the last tt changeset this node received from the
- *  orig node
- * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_lock: prevents from updating the table while reading it. Table update is
- *  made up by two operations (data structure update and metdata -CRC/TTVN-
- *  recalculation) and they have to be executed atomically in order to avoid
- *  another thread to read the table/metadata between those.
- * @bcast_bits: bitfield containing the info which payload broadcast originated
- *  from this orig node this host already has seen (relative to
- *  last_bcast_seqno)
- * @last_bcast_seqno: last broadcast sequence number received by this host
- * @neigh_list: list of potential next hop neighbor towards this orig node
- * @neigh_list_lock: lock protecting neigh_list and router
- * @hash_entry: hlist node for batadv_priv::orig_hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
- * @in_coding_list: list of nodes this orig can hear
- * @out_coding_list: list of nodes that can hear this orig
- * @in_coding_list_lock: protects in_coding_list
- * @out_coding_list_lock: protects out_coding_list
- * @fragments: array with heads for fragment chains
- * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
- *  originator represented by this object
- * @vlan_list_lock: lock protecting vlan_list
- * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
+	/** @orig: originator ethernet address */
 	u8 orig[ETH_ALEN];
+
+	/** @ifinfo_list: list for routers per outgoing interface */
 	struct hlist_head ifinfo_list;
+
+	/**
+	 * @last_bonding_candidate: pointer to last ifinfo of last used router
+	 */
 	struct batadv_orig_ifinfo *last_bonding_candidate;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/** @dat_addr: address of the orig node in the distributed hash */
 	batadv_dat_addr_t dat_addr;
 #endif
+
+	/** @last_seen: time when last packet from this node was received */
 	unsigned long last_seen;
+
+	/**
+	 * @bcast_seqno_reset: time when the broadcast seqno window was reset
+	 */
 	unsigned long bcast_seqno_reset;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
-	/* synchronizes mcast tvlv specific orig changes */
+	/**
+	 * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
+	 */
 	spinlock_t mcast_handler_lock;
+
+	/** @mcast_flags: multicast flags announced by the orig node */
 	u8 mcast_flags;
+
+	/**
+	 * @mcast_want_all_unsnoopables_node: a list node for the
+	 *  mcast.want_all_unsnoopables list
+	 */
 	struct hlist_node mcast_want_all_unsnoopables_node;
+
+	/**
+	 * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4
+	 *  list
+	 */
 	struct hlist_node mcast_want_all_ipv4_node;
+	/**
+	 * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6
+	 *  list
+	 */
 	struct hlist_node mcast_want_all_ipv6_node;
 #endif
+
+	/** @capabilities: announced capabilities of this originator */
 	unsigned long capabilities;
+
+	/**
+	 * @capa_initialized: bitfield to remember whether a capability was
+	 *  initialized
+	 */
 	unsigned long capa_initialized;
+
+	/** @last_ttvn: last seen translation table version number */
 	atomic_t last_ttvn;
+
+	/** @tt_buff: last tt changeset this node received from the orig node */
 	unsigned char *tt_buff;
+
+	/**
+	 * @tt_buff_len: length of the last tt changeset this node received
+	 *  from the orig node
+	 */
 	s16 tt_buff_len;
-	spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-	/* prevents from changing the table while reading it */
+
+	/** @tt_buff_lock: lock that protects tt_buff and tt_buff_len */
+	spinlock_t tt_buff_lock;
+
+	/**
+	 * @tt_lock: prevents from updating the table while reading it. Table
+	 *  update is made up by two operations (data structure update and
+	 *  metdata -CRC/TTVN-recalculation) and they have to be executed
+	 *  atomically in order to avoid another thread to read the
+	 *  table/metadata between those.
+	 */
 	spinlock_t tt_lock;
+
+	/**
+	 * @bcast_bits: bitfield containing the info which payload broadcast
+	 *  originated from this orig node this host already has seen (relative
+	 *  to last_bcast_seqno)
+	 */
 	DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+	/**
+	 * @last_bcast_seqno: last broadcast sequence number received by this
+	 *  host
+	 */
 	u32 last_bcast_seqno;
+
+	/**
+	 * @neigh_list: list of potential next hop neighbor towards this orig
+	 *  node
+	 */
 	struct hlist_head neigh_list;
-	/* neigh_list_lock protects: neigh_list, ifinfo_list,
-	 * last_bonding_candidate and router
+
+	/**
+	 * @neigh_list_lock: lock protecting neigh_list, ifinfo_list,
+	 *  last_bonding_candidate and router
 	 */
 	spinlock_t neigh_list_lock;
+
+	/** @hash_entry: hlist node for &batadv_priv.orig_hash */
 	struct hlist_node hash_entry;
+
+	/** @bat_priv: pointer to soft_iface this orig node belongs to */
 	struct batadv_priv *bat_priv;
-	/* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
+
+	/** @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno */
 	spinlock_t bcast_seqno_lock;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
 #ifdef CONFIG_BATMAN_ADV_NC
+	/** @in_coding_list: list of nodes this orig can hear */
 	struct list_head in_coding_list;
+
+	/** @out_coding_list: list of nodes that can hear this orig */
 	struct list_head out_coding_list;
-	spinlock_t in_coding_list_lock; /* Protects in_coding_list */
-	spinlock_t out_coding_list_lock; /* Protects out_coding_list */
+
+	/** @in_coding_list_lock: protects in_coding_list */
+	spinlock_t in_coding_list_lock;
+
+	/** @out_coding_list_lock: protects out_coding_list */
+	spinlock_t out_coding_list_lock;
 #endif
+
+	/** @fragments: array with heads for fragment chains */
 	struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+
+	/**
+	 * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by
+	 *  the originator represented by this object
+	 */
 	struct hlist_head vlan_list;
-	spinlock_t vlan_list_lock; /* protects vlan_list */
+
+	/** @vlan_list_lock: lock protecting vlan_list */
+	spinlock_t vlan_list_lock;
+
+	/** @bat_iv: B.A.T.M.A.N. IV private structure */
 	struct batadv_orig_bat_iv bat_iv;
 };
 
 /**
  * enum batadv_orig_capabilities - orig node capabilities
- * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
- * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
- * @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability
- * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
- *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
+	/**
+	 * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table
+	 *  enabled
+	 */
 	BATADV_ORIG_CAPA_HAS_DAT,
+
+	/** @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled */
 	BATADV_ORIG_CAPA_HAS_NC,
+
+	/** @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability */
 	BATADV_ORIG_CAPA_HAS_TT,
+
+	/**
+	 * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
+	 *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
+	 */
 	BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
  * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
- * @list: list node for batadv_priv_gw::list
- * @orig_node: pointer to corresponding orig node
- * @bandwidth_down: advertised uplink download bandwidth
- * @bandwidth_up: advertised uplink upload bandwidth
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_gw_node {
+	/** @list: list node for &batadv_priv_gw.list */
 	struct hlist_node list;
+
+	/** @orig_node: pointer to corresponding orig node */
 	struct batadv_orig_node *orig_node;
+
+	/** @bandwidth_down: advertised uplink download bandwidth */
 	u32 bandwidth_down;
+
+	/** @bandwidth_up: advertised uplink upload bandwidth */
 	u32 bandwidth_up;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
@@ -407,118 +562,161 @@ DECLARE_EWMA(throughput, 10, 8)
 /**
  * struct batadv_hardif_neigh_node_bat_v - B.A.T.M.A.N. V private neighbor
  *  information
- * @throughput: ewma link throughput towards this neighbor
- * @elp_interval: time interval between two ELP transmissions
- * @elp_latest_seqno: latest and best known ELP sequence number
- * @last_unicast_tx: when the last unicast packet has been sent to this neighbor
- * @metric_work: work queue callback item for metric update
  */
 struct batadv_hardif_neigh_node_bat_v {
+	/** @throughput: ewma link throughput towards this neighbor */
 	struct ewma_throughput throughput;
+
+	/** @elp_interval: time interval between two ELP transmissions */
 	u32 elp_interval;
+
+	/** @elp_latest_seqno: latest and best known ELP sequence number */
 	u32 elp_latest_seqno;
+
+	/**
+	 * @last_unicast_tx: when the last unicast packet has been sent to this
+	 *  neighbor
+	 */
 	unsigned long last_unicast_tx;
+
+	/** @metric_work: work queue callback item for metric update */
 	struct work_struct metric_work;
 };
 
 /**
  * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
- * @list: list node for batadv_hard_iface::neigh_list
- * @addr: the MAC address of the neighboring interface
- * @orig: the address of the originator this neighbor node belongs to
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @bat_v: B.A.T.M.A.N. V private data
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_hardif_neigh_node {
+	/** @list: list node for &batadv_hard_iface.neigh_list */
 	struct hlist_node list;
+
+	/** @addr: the MAC address of the neighboring interface */
 	u8 addr[ETH_ALEN];
+
+	/**
+	 * @orig: the address of the originator this neighbor node belongs to
+	 */
 	u8 orig[ETH_ALEN];
+
+	/** @if_incoming: pointer to incoming hard-interface */
 	struct batadv_hard_iface *if_incoming;
+
+	/** @last_seen: when last packet via this neighbor was received */
 	unsigned long last_seen;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: B.A.T.M.A.N. V private data */
 	struct batadv_hardif_neigh_node_bat_v bat_v;
 #endif
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_node - structure for single hops neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @orig_node: pointer to corresponding orig_node
- * @addr: the MAC address of the neighboring interface
- * @ifinfo_list: list for routing metrics per outgoing interface
- * @ifinfo_lock: lock protecting private ifinfo members and list
- * @if_incoming: pointer to incoming hard-interface
- * @last_seen: when last packet via this neighbor was received
- * @hardif_neigh: hardif_neigh of this neighbor
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_neigh_node {
+	/** @list: list node for &batadv_orig_node.neigh_list */
 	struct hlist_node list;
+
+	/** @orig_node: pointer to corresponding orig_node */
 	struct batadv_orig_node *orig_node;
+
+	/** @addr: the MAC address of the neighboring interface */
 	u8 addr[ETH_ALEN];
+
+	/** @ifinfo_list: list for routing metrics per outgoing interface */
 	struct hlist_head ifinfo_list;
-	spinlock_t ifinfo_lock;	/* protects ifinfo_list and its members */
+
+	/** @ifinfo_lock: lock protecting ifinfo_list and its members */
+	spinlock_t ifinfo_lock;
+
+	/** @if_incoming: pointer to incoming hard-interface */
 	struct batadv_hard_iface *if_incoming;
+
+	/** @last_seen: when last packet via this neighbor was received */
 	unsigned long last_seen;
+
+	/** @hardif_neigh: hardif_neigh of this neighbor */
 	struct batadv_hardif_neigh_node *hardif_neigh;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. IV
- * @tq_recv: ring buffer of received TQ values from this neigh node
- * @tq_index: ring buffer index
- * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @real_bits: bitfield containing the number of OGMs received from this neigh
- *  node (relative to orig_node->last_real_seqno)
- * @real_packet_count: counted result of real_bits
  */
 struct batadv_neigh_ifinfo_bat_iv {
+	/** @tq_recv: ring buffer of received TQ values from this neigh node */
 	u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+
+	/** @tq_index: ring buffer index */
 	u8 tq_index;
+
+	/**
+	 * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
+	 */
 	u8 tq_avg;
+
+	/**
+	 * @real_bits: bitfield containing the number of OGMs received from this
+	 *  neigh node (relative to orig_node->last_real_seqno)
+	 */
 	DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+
+	/** @real_packet_count: counted result of real_bits */
 	u8 real_packet_count;
 };
 
 /**
  * struct batadv_neigh_ifinfo_bat_v - neighbor information per outgoing
  *  interface for B.A.T.M.A.N. V
- * @throughput: last throughput metric received from originator via this neigh
- * @last_seqno: last sequence number known for this neighbor
  */
 struct batadv_neigh_ifinfo_bat_v {
+	/**
+	 * @throughput: last throughput metric received from originator via this
+	 *  neigh
+	 */
 	u32 throughput;
+
+	/** @last_seqno: last sequence number known for this neighbor */
 	u32 last_seqno;
 };
 
 /**
  * struct batadv_neigh_ifinfo - neighbor information per outgoing interface
- * @list: list node for batadv_neigh_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard-interface
- * @bat_iv: B.A.T.M.A.N. IV private structure
- * @bat_v: B.A.T.M.A.N. V private data
- * @last_ttl: last received ttl from this neigh node
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_neigh_ifinfo {
+	/** @list: list node for &batadv_neigh_node.ifinfo_list */
 	struct hlist_node list;
+
+	/** @if_outgoing: pointer to outgoing hard-interface */
 	struct batadv_hard_iface *if_outgoing;
+
+	/** @bat_iv: B.A.T.M.A.N. IV private structure */
 	struct batadv_neigh_ifinfo_bat_iv bat_iv;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: B.A.T.M.A.N. V private data */
 	struct batadv_neigh_ifinfo_bat_v bat_v;
 #endif
+
+	/** @last_ttl: last received ttl from this neigh node */
 	u8 last_ttl;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
@@ -526,148 +724,278 @@ struct batadv_neigh_ifinfo {
 
 /**
  * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
- * @orig: mac address of orig node orginating the broadcast
- * @crc: crc32 checksum of broadcast payload
- * @entrytime: time when the broadcast packet was received
  */
 struct batadv_bcast_duplist_entry {
+	/** @orig: mac address of orig node orginating the broadcast */
 	u8 orig[ETH_ALEN];
+
+	/** @crc: crc32 checksum of broadcast payload */
 	__be32 crc;
+
+	/** @entrytime: time when the broadcast packet was received */
 	unsigned long entrytime;
 };
 #endif
 
 /**
  * enum batadv_counters - indices for traffic counters
- * @BATADV_CNT_TX: transmitted payload traffic packet counter
- * @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter
- * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet counter
- * @BATADV_CNT_RX: received payload traffic packet counter
- * @BATADV_CNT_RX_BYTES: received payload traffic bytes counter
- * @BATADV_CNT_FORWARD: forwarded payload traffic packet counter
- * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
- * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
- * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
- * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
- * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
- * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
- * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
- * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
- * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
- * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
- * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
- * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
- * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet counter
- * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
- * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
- * @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter
- * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
- * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter
- * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet
- *  counter
- * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
- * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes counter
- * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet counter
- * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes counter
- * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc decoding
- * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
- * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes counter
- * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic packet
- *  counter
- * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in promisc
- *  mode.
- * @BATADV_CNT_NUM: number of traffic counters
  */
 enum batadv_counters {
+	/** @BATADV_CNT_TX: transmitted payload traffic packet counter */
 	BATADV_CNT_TX,
+
+	/** @BATADV_CNT_TX_BYTES: transmitted payload traffic bytes counter */
 	BATADV_CNT_TX_BYTES,
+
+	/**
+	 * @BATADV_CNT_TX_DROPPED: dropped transmission payload traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_TX_DROPPED,
+
+	/** @BATADV_CNT_RX: received payload traffic packet counter */
 	BATADV_CNT_RX,
+
+	/** @BATADV_CNT_RX_BYTES: received payload traffic bytes counter */
 	BATADV_CNT_RX_BYTES,
+
+	/** @BATADV_CNT_FORWARD: forwarded payload traffic packet counter */
 	BATADV_CNT_FORWARD,
+
+	/**
+	 * @BATADV_CNT_FORWARD_BYTES: forwarded payload traffic bytes counter
+	 */
 	BATADV_CNT_FORWARD_BYTES,
+
+	/**
+	 * @BATADV_CNT_MGMT_TX: transmitted routing protocol traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_MGMT_TX,
+
+	/**
+	 * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_MGMT_TX_BYTES,
+
+	/**
+	 * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
+	 */
 	BATADV_CNT_MGMT_RX,
+
+	/**
+	 * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_MGMT_RX_BYTES,
+
+	/** @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter */
 	BATADV_CNT_FRAG_TX,
+
+	/**
+	 * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+	 */
 	BATADV_CNT_FRAG_TX_BYTES,
+
+	/** @BATADV_CNT_FRAG_RX: received fragment traffic packet counter */
 	BATADV_CNT_FRAG_RX,
+
+	/**
+	 * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+	 */
 	BATADV_CNT_FRAG_RX_BYTES,
+
+	/** @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter */
 	BATADV_CNT_FRAG_FWD,
+
+	/**
+	 * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
+	 */
 	BATADV_CNT_FRAG_FWD_BYTES,
+
+	/**
+	 * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
+	 */
 	BATADV_CNT_TT_REQUEST_TX,
+
+	/** @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter */
 	BATADV_CNT_TT_REQUEST_RX,
+
+	/**
+	 * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_TT_RESPONSE_TX,
+
+	/**
+	 * @BATADV_CNT_TT_RESPONSE_RX: received tt resp traffic packet counter
+	 */
 	BATADV_CNT_TT_RESPONSE_RX,
+
+	/**
+	 * @BATADV_CNT_TT_ROAM_ADV_TX: transmitted tt roam traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_TT_ROAM_ADV_TX,
+
+	/**
+	 * @BATADV_CNT_TT_ROAM_ADV_RX: received tt roam traffic packet counter
+	 */
 	BATADV_CNT_TT_ROAM_ADV_RX,
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/**
+	 * @BATADV_CNT_DAT_GET_TX: transmitted dht GET traffic packet counter
+	 */
 	BATADV_CNT_DAT_GET_TX,
+
+	/** @BATADV_CNT_DAT_GET_RX: received dht GET traffic packet counter */
 	BATADV_CNT_DAT_GET_RX,
+
+	/**
+	 * @BATADV_CNT_DAT_PUT_TX: transmitted dht PUT traffic packet counter
+	 */
 	BATADV_CNT_DAT_PUT_TX,
+
+	/** @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter */
 	BATADV_CNT_DAT_PUT_RX,
+
+	/**
+	 * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic
+	 *  packet counter
+	 */
 	BATADV_CNT_DAT_CACHED_REPLY_TX,
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+	/**
+	 * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
+	 */
 	BATADV_CNT_NC_CODE,
+
+	/**
+	 * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_NC_CODE_BYTES,
+
+	/**
+	 * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet
+	 *  counter
+	 */
 	BATADV_CNT_NC_RECODE,
+
+	/**
+	 * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_NC_RECODE_BYTES,
+
+	/**
+	 * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc
+	 *  decoding
+	 */
 	BATADV_CNT_NC_BUFFER,
+
+	/**
+	 * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
+	 */
 	BATADV_CNT_NC_DECODE,
+
+	/**
+	 * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes
+	 *  counter
+	 */
 	BATADV_CNT_NC_DECODE_BYTES,
+
+	/**
+	 * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic
+	 *  packet counter
+	 */
 	BATADV_CNT_NC_DECODE_FAILED,
+
+	/**
+	 * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in
+	 *  promisc mode.
+	 */
 	BATADV_CNT_NC_SNIFFED,
 #endif
+
+	/** @BATADV_CNT_NUM: number of traffic counters */
 	BATADV_CNT_NUM,
 };
 
 /**
  * struct batadv_priv_tt - per mesh interface translation table data
- * @vn: translation table version number
- * @ogm_append_cnt: counter of number of OGMs containing the local tt diff
- * @local_changes: changes registered in an originator interval
- * @changes_list: tracks tt local changes within an originator interval
- * @local_hash: local translation table hash table
- * @global_hash: global translation table hash table
- * @req_list: list of pending & unanswered tt_requests
- * @roam_list: list of the last roaming events of each client limiting the
- *  number of roaming events to avoid route flapping
- * @changes_list_lock: lock protecting changes_list
- * @req_list_lock: lock protecting req_list
- * @roam_list_lock: lock protecting roam_list
- * @last_changeset: last tt changeset this host has generated
- * @last_changeset_len: length of last tt changeset this host has generated
- * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
- * @commit_lock: prevents from executing a local TT commit while reading the
- *  local table. The local TT commit is made up by two operations (data
- *  structure update and metdata -CRC/TTVN- recalculation) and they have to be
- *  executed atomically in order to avoid another thread to read the
- *  table/metadata between those.
- * @work: work queue callback item for translation table purging
  */
 struct batadv_priv_tt {
+	/** @vn: translation table version number */
 	atomic_t vn;
+
+	/**
+	 * @ogm_append_cnt: counter of number of OGMs containing the local tt
+	 *  diff
+	 */
 	atomic_t ogm_append_cnt;
+
+	/** @local_changes: changes registered in an originator interval */
 	atomic_t local_changes;
+
+	/**
+	 * @changes_list: tracks tt local changes within an originator interval
+	 */
 	struct list_head changes_list;
+
+	/** @local_hash: local translation table hash table */
 	struct batadv_hashtable *local_hash;
+
+	/** @global_hash: global translation table hash table */
 	struct batadv_hashtable *global_hash;
+
+	/** @req_list: list of pending & unanswered tt_requests */
 	struct hlist_head req_list;
+
+	/**
+	 * @roam_list: list of the last roaming events of each client limiting
+	 *  the number of roaming events to avoid route flapping
+	 */
 	struct list_head roam_list;
-	spinlock_t changes_list_lock; /* protects changes */
-	spinlock_t req_list_lock; /* protects req_list */
-	spinlock_t roam_list_lock; /* protects roam_list */
+
+	/** @changes_list_lock: lock protecting changes_list */
+	spinlock_t changes_list_lock;
+
+	/** @req_list_lock: lock protecting req_list */
+	spinlock_t req_list_lock;
+
+	/** @roam_list_lock: lock protecting roam_list */
+	spinlock_t roam_list_lock;
+
+	/** @last_changeset: last tt changeset this host has generated */
 	unsigned char *last_changeset;
+
+	/**
+	 * @last_changeset_len: length of last tt changeset this host has
+	 *  generated
+	 */
 	s16 last_changeset_len;
-	/* protects last_changeset & last_changeset_len */
+
+	/**
+	 * @last_changeset_lock: lock protecting last_changeset &
+	 *  last_changeset_len
+	 */
 	spinlock_t last_changeset_lock;
-	/* prevents from executing a commit while reading the table */
+
+	/**
+	 * @commit_lock: prevents from executing a local TT commit while reading
+	 *  the local table. The local TT commit is made up by two operations
+	 *  (data structure update and metdata -CRC/TTVN- recalculation) and
+	 *  they have to be executed atomically in order to avoid another thread
+	 *  to read the table/metadata between those.
+	 */
 	spinlock_t commit_lock;
+
+	/** @work: work queue callback item for translation table purging */
 	struct delayed_work work;
 };
 
@@ -675,31 +1003,57 @@ struct batadv_priv_tt {
 
 /**
  * struct batadv_priv_bla - per mesh interface bridge loope avoidance data
- * @num_requests: number of bla requests in flight
- * @claim_hash: hash table containing mesh nodes this host has claimed
- * @backbone_hash: hash table containing all detected backbone gateways
- * @loopdetect_addr: MAC address used for own loopdetection frames
- * @loopdetect_lasttime: time when the loopdetection frames were sent
- * @loopdetect_next: how many periods to wait for the next loopdetect process
- * @bcast_duplist: recently received broadcast packets array (for broadcast
- *  duplicate suppression)
- * @bcast_duplist_curr: index of last broadcast packet added to bcast_duplist
- * @bcast_duplist_lock: lock protecting bcast_duplist & bcast_duplist_curr
- * @claim_dest: local claim data (e.g. claim group)
- * @work: work queue callback item for cleanups & bla announcements
  */
 struct batadv_priv_bla {
+	/** @num_requests: number of bla requests in flight */
 	atomic_t num_requests;
+
+	/**
+	 * @claim_hash: hash table containing mesh nodes this host has claimed
+	 */
 	struct batadv_hashtable *claim_hash;
+
+	/**
+	 * @backbone_hash: hash table containing all detected backbone gateways
+	 */
 	struct batadv_hashtable *backbone_hash;
+
+	/** @loopdetect_addr: MAC address used for own loopdetection frames */
 	u8 loopdetect_addr[ETH_ALEN];
+
+	/**
+	 * @loopdetect_lasttime: time when the loopdetection frames were sent
+	 */
 	unsigned long loopdetect_lasttime;
+
+	/**
+	 * @loopdetect_next: how many periods to wait for the next loopdetect
+	 *  process
+	 */
 	atomic_t loopdetect_next;
+
+	/**
+	 * @bcast_duplist: recently received broadcast packets array (for
+	 *  broadcast duplicate suppression)
+	 */
 	struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
+
+	/**
+	 * @bcast_duplist_curr: index of last broadcast packet added to
+	 *  bcast_duplist
+	 */
 	int bcast_duplist_curr;
-	/* protects bcast_duplist & bcast_duplist_curr */
+
+	/**
+	 * @bcast_duplist_lock: lock protecting bcast_duplist &
+	 *  bcast_duplist_curr
+	 */
 	spinlock_t bcast_duplist_lock;
+
+	/** @claim_dest: local claim data (e.g. claim group) */
 	struct batadv_bla_claim_dst claim_dest;
+
+	/** @work: work queue callback item for cleanups & bla announcements */
 	struct delayed_work work;
 };
 #endif
@@ -708,68 +1062,94 @@ struct batadv_priv_bla {
 
 /**
  * struct batadv_priv_debug_log - debug logging data
- * @log_buff: buffer holding the logs (ring bufer)
- * @log_start: index of next character to read
- * @log_end: index of next character to write
- * @lock: lock protecting log_buff, log_start & log_end
- * @queue_wait: log reader's wait queue
  */
 struct batadv_priv_debug_log {
+	/** @log_buff: buffer holding the logs (ring bufer) */
 	char log_buff[BATADV_LOG_BUF_LEN];
+
+	/** @log_start: index of next character to read */
 	unsigned long log_start;
+
+	/** @log_end: index of next character to write */
 	unsigned long log_end;
-	spinlock_t lock; /* protects log_buff, log_start and log_end */
+
+	/** @lock: lock protecting log_buff, log_start & log_end */
+	spinlock_t lock;
+
+	/** @queue_wait: log reader's wait queue */
 	wait_queue_head_t queue_wait;
 };
 #endif
 
 /**
  * struct batadv_priv_gw - per mesh interface gateway data
- * @gateway_list: list of available gateway nodes
- * @list_lock: lock protecting gateway_list & curr_gw
- * @curr_gw: pointer to currently selected gateway node
- * @mode: gateway operation: off, client or server (see batadv_gw_modes)
- * @sel_class: gateway selection class (applies if gw_mode client)
- * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
- * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
- * @reselect: bool indicating a gateway re-selection is in progress
  */
 struct batadv_priv_gw {
+	/** @gateway_list: list of available gateway nodes */
 	struct hlist_head gateway_list;
-	spinlock_t list_lock; /* protects gateway_list & curr_gw */
-	struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+
+	/** @list_lock: lock protecting gateway_list & curr_gw */
+	spinlock_t list_lock;
+
+	/** @curr_gw: pointer to currently selected gateway node */
+	struct batadv_gw_node __rcu *curr_gw;
+
+	/**
+	 * @mode: gateway operation: off, client or server (see batadv_gw_modes)
+	 */
 	atomic_t mode;
+
+	/** @sel_class: gateway selection class (applies if gw_mode client) */
 	atomic_t sel_class;
+
+	/**
+	 * @bandwidth_down: advertised uplink download bandwidth (if gw_mode
+	 *  server)
+	 */
 	atomic_t bandwidth_down;
+
+	/**
+	 * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
+	 */
 	atomic_t bandwidth_up;
+
+	/** @reselect: bool indicating a gateway re-selection is in progress */
 	atomic_t reselect;
 };
 
 /**
  * struct batadv_priv_tvlv - per mesh interface tvlv data
- * @container_list: list of registered tvlv containers to be sent with each OGM
- * @handler_list: list of the various tvlv content handlers
- * @container_list_lock: protects tvlv container list access
- * @handler_list_lock: protects handler list access
  */
 struct batadv_priv_tvlv {
+	/**
+	 * @container_list: list of registered tvlv containers to be sent with
+	 *  each OGM
+	 */
 	struct hlist_head container_list;
+
+	/** @handler_list: list of the various tvlv content handlers */
 	struct hlist_head handler_list;
-	spinlock_t container_list_lock; /* protects container_list */
-	spinlock_t handler_list_lock; /* protects handler_list */
+
+	/** @container_list_lock: protects tvlv container list access */
+	spinlock_t container_list_lock;
+
+	/** @handler_list_lock: protects handler list access */
+	spinlock_t handler_list_lock;
 };
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 
 /**
  * struct batadv_priv_dat - per mesh interface DAT private data
- * @addr: node DAT address
- * @hash: hashtable representing the local ARP cache
- * @work: work queue callback item for cache purging
  */
 struct batadv_priv_dat {
+	/** @addr: node DAT address */
 	batadv_dat_addr_t addr;
+
+	/** @hash: hashtable representing the local ARP cache */
 	struct batadv_hashtable *hash;
+
+	/** @work: work queue callback item for cache purging */
 	struct delayed_work work;
 };
 #endif
@@ -777,375 +1157,582 @@ struct batadv_priv_dat {
 #ifdef CONFIG_BATMAN_ADV_MCAST
 /**
  * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged
- * @exists: whether a querier exists in the mesh
- * @shadowing: if a querier exists, whether it is potentially shadowing
- *  multicast listeners (i.e. querier is behind our own bridge segment)
  */
 struct batadv_mcast_querier_state {
+	/** @exists: whether a querier exists in the mesh */
 	bool exists;
+
+	/**
+	 * @shadowing: if a querier exists, whether it is potentially shadowing
+	 *  multicast listeners (i.e. querier is behind our own bridge segment)
+	 */
 	bool shadowing;
 };
 
 /**
  * struct batadv_priv_mcast - per mesh interface mcast data
- * @mla_list: list of multicast addresses we are currently announcing via TT
- * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable
- *  multicast traffic
- * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic
- * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic
- * @querier_ipv4: the current state of an IGMP querier in the mesh
- * @querier_ipv6: the current state of an MLD querier in the mesh
- * @flags: the flags we have last sent in our mcast tvlv
- * @enabled: whether the multicast tvlv is currently enabled
- * @bridged: whether the soft interface has a bridge on top
- * @num_disabled: number of nodes that have no mcast tvlv
- * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic
- * @num_want_all_ipv4: counter for items in want_all_ipv4_list
- * @num_want_all_ipv6: counter for items in want_all_ipv6_list
- * @want_lists_lock: lock for protecting modifications to mcast want lists
- *  (traversals are rcu-locked)
- * @work: work queue callback item for multicast TT and TVLV updates
  */
 struct batadv_priv_mcast {
+	/**
+	 * @mla_list: list of multicast addresses we are currently announcing
+	 *  via TT
+	 */
 	struct hlist_head mla_list; /* see __batadv_mcast_mla_update() */
+
+	/**
+	 * @want_all_unsnoopables_list: a list of orig_nodes wanting all
+	 *  unsnoopable multicast traffic
+	 */
 	struct hlist_head want_all_unsnoopables_list;
+
+	/**
+	 * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast
+	 *  traffic
+	 */
 	struct hlist_head want_all_ipv4_list;
+
+	/**
+	 * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast
+	 *  traffic
+	 */
 	struct hlist_head want_all_ipv6_list;
+
+	/** @querier_ipv4: the current state of an IGMP querier in the mesh */
 	struct batadv_mcast_querier_state querier_ipv4;
+
+	/** @querier_ipv6: the current state of an MLD querier in the mesh */
 	struct batadv_mcast_querier_state querier_ipv6;
+
+	/** @flags: the flags we have last sent in our mcast tvlv */
 	u8 flags;
+
+	/** @enabled: whether the multicast tvlv is currently enabled */
 	bool enabled;
+
+	/** @bridged: whether the soft interface has a bridge on top */
 	bool bridged;
+
+	/** @num_disabled: number of nodes that have no mcast tvlv */
 	atomic_t num_disabled;
+
+	/**
+	 * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
+	 *  traffic
+	 */
 	atomic_t num_want_all_unsnoopables;
+
+	/** @num_want_all_ipv4: counter for items in want_all_ipv4_list */
 	atomic_t num_want_all_ipv4;
+
+	/** @num_want_all_ipv6: counter for items in want_all_ipv6_list */
 	atomic_t num_want_all_ipv6;
-	/* protects want_all_{unsnoopables,ipv4,ipv6}_list */
+
+	/**
+	 * @want_lists_lock: lock for protecting modifications to mcasts
+	 *  want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked)
+	 */
 	spinlock_t want_lists_lock;
+
+	/** @work: work queue callback item for multicast TT and TVLV updates */
 	struct delayed_work work;
 };
 #endif
 
 /**
  * struct batadv_priv_nc - per mesh interface network coding private data
- * @work: work queue callback item for cleanup
- * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
- * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
- * @max_fwd_delay: maximum packet forward delay to allow coding of packets
- * @max_buffer_time: buffer time for sniffed packets used to decoding
- * @timestamp_fwd_flush: timestamp of last forward packet queue flush
- * @timestamp_sniffed_purge: timestamp of last sniffed packet queue purge
- * @coding_hash: Hash table used to buffer skbs while waiting for another
- *  incoming skb to code it with. Skbs are added to the buffer just before being
- *  forwarded in routing.c
- * @decoding_hash: Hash table used to buffer skbs that might be needed to decode
- *  a received coded skb. The buffer is used for 1) skbs arriving on the
- *  soft-interface; 2) skbs overheard on the hard-interface; and 3) skbs
- *  forwarded by batman-adv.
  */
 struct batadv_priv_nc {
+	/** @work: work queue callback item for cleanup */
 	struct delayed_work work;
+
+	/**
+	 * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
+	 */
 	struct dentry *debug_dir;
+
+	/**
+	 * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
+	 */
 	u8 min_tq;
+
+	/**
+	 * @max_fwd_delay: maximum packet forward delay to allow coding of
+	 *  packets
+	 */
 	u32 max_fwd_delay;
+
+	/**
+	 * @max_buffer_time: buffer time for sniffed packets used to decoding
+	 */
 	u32 max_buffer_time;
+
+	/**
+	 * @timestamp_fwd_flush: timestamp of last forward packet queue flush
+	 */
 	unsigned long timestamp_fwd_flush;
+
+	/**
+	 * @timestamp_sniffed_purge: timestamp of last sniffed packet queue
+	 *  purge
+	 */
 	unsigned long timestamp_sniffed_purge;
+
+	/**
+	 * @coding_hash: Hash table used to buffer skbs while waiting for
+	 *  another incoming skb to code it with. Skbs are added to the buffer
+	 *  just before being forwarded in routing.c
+	 */
 	struct batadv_hashtable *coding_hash;
+
+	/**
+	 * @decoding_hash: Hash table used to buffer skbs that might be needed
+	 *  to decode a received coded skb. The buffer is used for 1) skbs
+	 *  arriving on the soft-interface; 2) skbs overheard on the
+	 *  hard-interface; and 3) skbs forwarded by batman-adv.
+	 */
 	struct batadv_hashtable *decoding_hash;
 };
 
 /**
  * struct batadv_tp_unacked - unacked packet meta-information
- * @seqno: seqno of the unacked packet
- * @len: length of the packet
- * @list: list node for batadv_tp_vars::unacked_list
  *
  * This struct is supposed to represent a buffer unacked packet. However, since
  * the purpose of the TP meter is to count the traffic only, there is no need to
  * store the entire sk_buff, the starting offset and the length are enough
  */
 struct batadv_tp_unacked {
+	/** @seqno: seqno of the unacked packet */
 	u32 seqno;
+
+	/** @len: length of the packet */
 	u16 len;
+
+	/** @list: list node for &batadv_tp_vars.unacked_list */
 	struct list_head list;
 };
 
 /**
  * enum batadv_tp_meter_role - Modus in tp meter session
- * @BATADV_TP_RECEIVER: Initialized as receiver
- * @BATADV_TP_SENDER: Initialized as sender
  */
 enum batadv_tp_meter_role {
+	/** @BATADV_TP_RECEIVER: Initialized as receiver */
 	BATADV_TP_RECEIVER,
+
+	/** @BATADV_TP_SENDER: Initialized as sender */
 	BATADV_TP_SENDER
 };
 
 /**
  * struct batadv_tp_vars - tp meter private variables per session
- * @list: list node for bat_priv::tp_list
- * @timer: timer for ack (receiver) and retry (sender)
- * @bat_priv: pointer to the mesh object
- * @start_time: start time in jiffies
- * @other_end: mac address of remote
- * @role: receiver/sender modi
- * @sending: sending binary semaphore: 1 if sending, 0 is not
- * @reason: reason for a stopped session
- * @finish_work: work item for the finishing procedure
- * @test_length: test length in milliseconds
- * @session: TP session identifier
- * @icmp_uid: local ICMP "socket" index
- * @dec_cwnd: decimal part of the cwnd used during linear growth
- * @cwnd: current size of the congestion window
- * @cwnd_lock: lock do protect @cwnd & @dec_cwnd
- * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
- *  connection switches to the Congestion Avoidance state
- * @last_acked: last acked byte
- * @last_sent: last sent byte, not yet acked
- * @tot_sent: amount of data sent/ACKed so far
- * @dup_acks: duplicate ACKs counter
- * @fast_recovery: true if in Fast Recovery mode
- * @recover: last sent seqno when entering Fast Recovery
- * @rto: sender timeout
- * @srtt: smoothed RTT scaled by 2^3
- * @rttvar: RTT variation scaled by 2^2
- * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout
- * @prerandom_offset: offset inside the prerandom buffer
- * @prerandom_lock: spinlock protecting access to prerandom_offset
- * @last_recv: last in-order received packet
- * @unacked_list: list of unacked packets (meta-info only)
- * @unacked_lock: protect unacked_list
- * @last_recv_time: time time (jiffies) a msg was received
- * @refcount: number of context where the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tp_vars {
+	/** @list: list node for &bat_priv.tp_list */
 	struct hlist_node list;
+
+	/** @timer: timer for ack (receiver) and retry (sender) */
 	struct timer_list timer;
+
+	/** @bat_priv: pointer to the mesh object */
 	struct batadv_priv *bat_priv;
+
+	/** @start_time: start time in jiffies */
 	unsigned long start_time;
+
+	/** @other_end: mac address of remote */
 	u8 other_end[ETH_ALEN];
+
+	/** @role: receiver/sender modi */
 	enum batadv_tp_meter_role role;
+
+	/** @sending: sending binary semaphore: 1 if sending, 0 is not */
 	atomic_t sending;
+
+	/** @reason: reason for a stopped session */
 	enum batadv_tp_meter_reason reason;
+
+	/** @finish_work: work item for the finishing procedure */
 	struct delayed_work finish_work;
+
+	/** @test_length: test length in milliseconds */
 	u32 test_length;
+
+	/** @session: TP session identifier */
 	u8 session[2];
+
+	/** @icmp_uid: local ICMP "socket" index */
 	u8 icmp_uid;
 
 	/* sender variables */
+
+	/** @dec_cwnd: decimal part of the cwnd used during linear growth */
 	u16 dec_cwnd;
+
+	/** @cwnd: current size of the congestion window */
 	u32 cwnd;
-	spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */
+
+	/** @cwnd_lock: lock do protect @cwnd & @dec_cwnd */
+	spinlock_t cwnd_lock;
+
+	/**
+	 * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the
+	 *  connection switches to the Congestion Avoidance state
+	 */
 	u32 ss_threshold;
+
+	/** @last_acked: last acked byte */
 	atomic_t last_acked;
+
+	/** @last_sent: last sent byte, not yet acked */
 	u32 last_sent;
+
+	/** @tot_sent: amount of data sent/ACKed so far */
 	atomic64_t tot_sent;
+
+	/** @dup_acks: duplicate ACKs counter */
 	atomic_t dup_acks;
+
+	/** @fast_recovery: true if in Fast Recovery mode */
 	bool fast_recovery;
+
+	/** @recover: last sent seqno when entering Fast Recovery */
 	u32 recover;
+
+	/** @rto: sender timeout */
 	u32 rto;
+
+	/** @srtt: smoothed RTT scaled by 2^3 */
 	u32 srtt;
+
+	/** @rttvar: RTT variation scaled by 2^2 */
 	u32 rttvar;
+
+	/**
+	 * @more_bytes: waiting queue anchor when waiting for more ack/retry
+	 *  timeout
+	 */
 	wait_queue_head_t more_bytes;
+
+	/** @prerandom_offset: offset inside the prerandom buffer */
 	u32 prerandom_offset;
-	spinlock_t prerandom_lock; /* Protects prerandom_offset */
+
+	/** @prerandom_lock: spinlock protecting access to prerandom_offset */
+	spinlock_t prerandom_lock;
 
 	/* receiver variables */
+
+	/** @last_recv: last in-order received packet */
 	u32 last_recv;
+
+	/** @unacked_list: list of unacked packets (meta-info only) */
 	struct list_head unacked_list;
-	spinlock_t unacked_lock; /* Protects unacked_list */
+
+	/** @unacked_lock: protect unacked_list */
+	spinlock_t unacked_lock;
+
+	/** @last_recv_time: time time (jiffies) a msg was received */
 	unsigned long last_recv_time;
+
+	/** @refcount: number of context where the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
- * @bat_priv: pointer to the mesh object
- * @vid: VLAN identifier
- * @kobj: kobject for sysfs vlan subdirectory
- * @ap_isolation: AP isolation state
- * @tt: TT private attributes (VLAN specific)
- * @list: list node for bat_priv::softif_vlan_list
- * @refcount: number of context where this object is currently in use
- * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+	/** @bat_priv: pointer to the mesh object */
 	struct batadv_priv *bat_priv;
+
+	/** @vid: VLAN identifier */
 	unsigned short vid;
+
+	/** @kobj: kobject for sysfs vlan subdirectory */
 	struct kobject *kobj;
+
+	/** @ap_isolation: AP isolation state */
 	atomic_t ap_isolation;		/* boolean */
+
+	/** @tt: TT private attributes (VLAN specific) */
 	struct batadv_vlan_tt tt;
+
+	/** @list: list node for &bat_priv.softif_vlan_list */
 	struct hlist_node list;
+
+	/**
+	 * @refcount: number of context where this object is currently in use
+	 */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data
- * @ogm_buff: buffer holding the OGM packet
- * @ogm_buff_len: length of the OGM packet buffer
- * @ogm_seqno: OGM sequence number - used to identify each OGM
- * @ogm_wq: workqueue used to schedule OGM transmissions
  */
 struct batadv_priv_bat_v {
+	/** @ogm_buff: buffer holding the OGM packet */
 	unsigned char *ogm_buff;
+
+	/** @ogm_buff_len: length of the OGM packet buffer */
 	int ogm_buff_len;
+
+	/** @ogm_seqno: OGM sequence number - used to identify each OGM */
 	atomic_t ogm_seqno;
+
+	/** @ogm_wq: workqueue used to schedule OGM transmissions */
 	struct delayed_work ogm_wq;
 };
 
 /**
  * struct batadv_priv - per mesh interface data
- * @mesh_state: current status of the mesh (inactive/active/deactivating)
- * @soft_iface: net device which holds this struct as private data
- * @bat_counters: mesh internal traffic statistic counters (see batadv_counters)
- * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
- * @bonding: bool indicating whether traffic bonding is enabled
- * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @packet_size_max: max packet size that can be transmitted via
- *  multiple fragmented skbs or a single frame if fragmentation is disabled
- * @frag_seqno: incremental counter to identify chains of egress fragments
- * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
- *  enabled
- * @distributed_arp_table: bool indicating whether distributed ARP table is
- *  enabled
- * @multicast_mode: Enable or disable multicast optimizations on this node's
- *  sender/originating side
- * @orig_interval: OGM broadcast interval in milliseconds
- * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
- * @log_level: configured log level (see batadv_dbg_level)
- * @isolation_mark: the skb->mark value used to match packets for AP isolation
- * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be used
- *  for the isolation mark
- * @bcast_seqno: last sent broadcast packet sequence number
- * @bcast_queue_left: number of remaining buffered broadcast packet slots
- * @batman_queue_left: number of remaining OGM packet slots
- * @num_ifaces: number of interfaces assigned to this mesh interface
- * @mesh_obj: kobject for sysfs mesh subdirectory
- * @debug_dir: dentry for debugfs batman-adv subdirectory
- * @forw_bat_list: list of aggregated OGMs that will be forwarded
- * @forw_bcast_list: list of broadcast packets that will be rebroadcasted
- * @tp_list: list of tp sessions
- * @tp_num: number of currently active tp sessions
- * @orig_hash: hash table containing mesh participants (orig nodes)
- * @forw_bat_list_lock: lock protecting forw_bat_list
- * @forw_bcast_list_lock: lock protecting forw_bcast_list
- * @tp_list_lock: spinlock protecting @tp_list
- * @orig_work: work queue callback item for orig node purging
- * @primary_if: one of the hard-interfaces assigned to this mesh interface
- *  becomes the primary interface
- * @algo_ops: routing algorithm used by this mesh interface
- * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
- *  of the mesh interface represented by this object
- * @softif_vlan_list_lock: lock protecting softif_vlan_list
- * @bla: bridge loope avoidance data
- * @debug_log: holding debug logging relevant data
- * @gw: gateway data
- * @tt: translation table data
- * @tvlv: type-version-length-value data
- * @dat: distributed arp table data
- * @mcast: multicast data
- * @network_coding: bool indicating whether network coding is enabled
- * @nc: network coding data
- * @bat_v: B.A.T.M.A.N. V per soft-interface private data
  */
 struct batadv_priv {
+	/**
+	 * @mesh_state: current status of the mesh
+	 *  (inactive/active/deactivating)
+	 */
 	atomic_t mesh_state;
+
+	/** @soft_iface: net device which holds this struct as private data */
 	struct net_device *soft_iface;
+
+	/**
+	 * @bat_counters: mesh internal traffic statistic counters (see
+	 *  batadv_counters)
+	 */
 	u64 __percpu *bat_counters; /* Per cpu counters */
+
+	/**
+	 * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
+	 */
 	atomic_t aggregated_ogms;
+
+	/** @bonding: bool indicating whether traffic bonding is enabled */
 	atomic_t bonding;
+
+	/**
+	 * @fragmentation: bool indicating whether traffic fragmentation is
+	 *  enabled
+	 */
 	atomic_t fragmentation;
+
+	/**
+	 * @packet_size_max: max packet size that can be transmitted via
+	 *  multiple fragmented skbs or a single frame if fragmentation is
+	 *  disabled
+	 */
 	atomic_t packet_size_max;
+
+	/**
+	 * @frag_seqno: incremental counter to identify chains of egress
+	 *  fragments
+	 */
 	atomic_t frag_seqno;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+	/**
+	 * @bridge_loop_avoidance: bool indicating whether bridge loop
+	 *  avoidance is enabled
+	 */
 	atomic_t bridge_loop_avoidance;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/**
+	 * @distributed_arp_table: bool indicating whether distributed ARP table
+	 *  is enabled
+	 */
 	atomic_t distributed_arp_table;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+	/**
+	 * @multicast_mode: Enable or disable multicast optimizations on this
+	 *  node's sender/originating side
+	 */
 	atomic_t multicast_mode;
 #endif
+
+	/** @orig_interval: OGM broadcast interval in milliseconds */
 	atomic_t orig_interval;
+
+	/**
+	 * @hop_penalty: penalty which will be applied to an OGM's tq-field on
+	 *  every hop
+	 */
 	atomic_t hop_penalty;
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+	/** @log_level: configured log level (see batadv_dbg_level) */
 	atomic_t log_level;
 #endif
+
+	/**
+	 * @isolation_mark: the skb->mark value used to match packets for AP
+	 *  isolation
+	 */
 	u32 isolation_mark;
+
+	/**
+	 * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be
+	 *  used for the isolation mark
+	 */
 	u32 isolation_mark_mask;
+
+	/** @bcast_seqno: last sent broadcast packet sequence number */
 	atomic_t bcast_seqno;
+
+	/**
+	 * @bcast_queue_left: number of remaining buffered broadcast packet
+	 *  slots
+	 */
 	atomic_t bcast_queue_left;
+
+	/** @batman_queue_left: number of remaining OGM packet slots */
 	atomic_t batman_queue_left;
+
+	/** @num_ifaces: number of interfaces assigned to this mesh interface */
 	char num_ifaces;
+
+	/** @mesh_obj: kobject for sysfs mesh subdirectory */
 	struct kobject *mesh_obj;
+
+	/** @debug_dir: dentry for debugfs batman-adv subdirectory */
 	struct dentry *debug_dir;
+
+	/** @forw_bat_list: list of aggregated OGMs that will be forwarded */
 	struct hlist_head forw_bat_list;
+
+	/**
+	 * @forw_bcast_list: list of broadcast packets that will be
+	 *  rebroadcasted
+	 */
 	struct hlist_head forw_bcast_list;
+
+	/** @tp_list: list of tp sessions */
 	struct hlist_head tp_list;
+
+	/** @tp_num: number of currently active tp sessions */
 	struct batadv_hashtable *orig_hash;
-	spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
-	spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */
-	spinlock_t tp_list_lock; /* protects tp_list */
+
+	/** @orig_hash: hash table containing mesh participants (orig nodes) */
+	spinlock_t forw_bat_list_lock;
+
+	/** @forw_bat_list_lock: lock protecting forw_bat_list */
+	spinlock_t forw_bcast_list_lock;
+
+	/** @forw_bcast_list_lock: lock protecting forw_bcast_list */
+	spinlock_t tp_list_lock;
+
+	/** @tp_list_lock: spinlock protecting @tp_list */
 	atomic_t tp_num;
+
+	/** @orig_work: work queue callback item for orig node purging */
 	struct delayed_work orig_work;
+
+	/**
+	 * @primary_if: one of the hard-interfaces assigned to this mesh
+	 *  interface becomes the primary interface
+	 */
 	struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
+
+	/** @algo_ops: routing algorithm used by this mesh interface */
 	struct batadv_algo_ops *algo_ops;
+
+	/**
+	 * @softif_vlan_list: a list of softif_vlan structs, one per VLAN
+	 *  created on top of the mesh interface represented by this object
+	 */
 	struct hlist_head softif_vlan_list;
-	spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
+
+	/** @softif_vlan_list_lock: lock protecting softif_vlan_list */
+	spinlock_t softif_vlan_list_lock;
+
 #ifdef CONFIG_BATMAN_ADV_BLA
+	/** @bla: bridge loope avoidance data */
 	struct batadv_priv_bla bla;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_DEBUG
+	/** @debug_log: holding debug logging relevant data */
 	struct batadv_priv_debug_log *debug_log;
 #endif
+
+	/** @gw: gateway data */
 	struct batadv_priv_gw gw;
+
+	/** @tt: translation table data */
 	struct batadv_priv_tt tt;
+
+	/** @tvlv: type-version-length-value data */
 	struct batadv_priv_tvlv tvlv;
+
 #ifdef CONFIG_BATMAN_ADV_DAT
+	/** @dat: distributed arp table data */
 	struct batadv_priv_dat dat;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
+	/** @mcast: multicast data */
 	struct batadv_priv_mcast mcast;
 #endif
+
 #ifdef CONFIG_BATMAN_ADV_NC
+	/**
+	 * @network_coding: bool indicating whether network coding is enabled
+	 */
 	atomic_t network_coding;
+
+	/** @nc: network coding data */
 	struct batadv_priv_nc nc;
 #endif /* CONFIG_BATMAN_ADV_NC */
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
+	/** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
 	struct batadv_priv_bat_v bat_v;
 #endif
 };
 
 /**
  * struct batadv_socket_client - layer2 icmp socket client data
- * @queue_list: packet queue for packets destined for this socket client
- * @queue_len: number of packets in the packet queue (queue_list)
- * @index: socket client's index in the batadv_socket_client_hash
- * @lock: lock protecting queue_list, queue_len & index
- * @queue_wait: socket client's wait queue
- * @bat_priv: pointer to soft_iface this client belongs to
  */
 struct batadv_socket_client {
+	/**
+	 * @queue_list: packet queue for packets destined for this socket client
+	 */
 	struct list_head queue_list;
+
+	/** @queue_len: number of packets in the packet queue (queue_list) */
 	unsigned int queue_len;
+
+	/** @index: socket client's index in the batadv_socket_client_hash */
 	unsigned char index;
-	spinlock_t lock; /* protects queue_list, queue_len & index */
+
+	/** @lock: lock protecting queue_list, queue_len & index */
+	spinlock_t lock;
+
+	/** @queue_wait: socket client's wait queue */
 	wait_queue_head_t queue_wait;
+
+	/** @bat_priv: pointer to soft_iface this client belongs to */
 	struct batadv_priv *bat_priv;
 };
 
 /**
  * struct batadv_socket_packet - layer2 icmp packet for socket client
- * @list: list node for batadv_socket_client::queue_list
- * @icmp_len: size of the layer2 icmp packet
- * @icmp_packet: layer2 icmp packet
  */
 struct batadv_socket_packet {
+	/** @list: list node for &batadv_socket_client.queue_list */
 	struct list_head list;
+
+	/** @icmp_len: size of the layer2 icmp packet */
 	size_t icmp_len;
+
+	/** @icmp_packet: layer2 icmp packet */
 	u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
@@ -1153,312 +1740,432 @@ struct batadv_socket_packet {
 
 /**
  * struct batadv_bla_backbone_gw - batman-adv gateway bridged into the LAN
- * @orig: originator address of backbone node (mac address of primary iface)
- * @vid: vlan id this gateway was detected on
- * @hash_entry: hlist node for batadv_priv_bla::backbone_hash
- * @bat_priv: pointer to soft_iface this backbone gateway belongs to
- * @lasttime: last time we heard of this backbone gw
- * @wait_periods: grace time for bridge forward delays and bla group forming at
- *  bootup phase - no bcast traffic is formwared until it has elapsed
- * @request_sent: if this bool is set to true we are out of sync with this
- *  backbone gateway - no bcast traffic is formwared until the situation was
- *  resolved
- * @crc: crc16 checksum over all claims
- * @crc_lock: lock protecting crc
- * @report_work: work struct for reporting detected loops
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_backbone_gw {
+	/**
+	 * @orig: originator address of backbone node (mac address of primary
+	 *  iface)
+	 */
 	u8 orig[ETH_ALEN];
+
+	/** @vid: vlan id this gateway was detected on */
 	unsigned short vid;
+
+	/** @hash_entry: hlist node for &batadv_priv_bla.backbone_hash */
 	struct hlist_node hash_entry;
+
+	/** @bat_priv: pointer to soft_iface this backbone gateway belongs to */
 	struct batadv_priv *bat_priv;
+
+	/** @lasttime: last time we heard of this backbone gw */
 	unsigned long lasttime;
+
+	/**
+	 * @wait_periods: grace time for bridge forward delays and bla group
+	 *  forming at bootup phase - no bcast traffic is formwared until it has
+	 *  elapsed
+	 */
 	atomic_t wait_periods;
+
+	/**
+	 * @request_sent: if this bool is set to true we are out of sync with
+	 *  this backbone gateway - no bcast traffic is formwared until the
+	 *  situation was resolved
+	 */
 	atomic_t request_sent;
+
+	/** @crc: crc16 checksum over all claims */
 	u16 crc;
-	spinlock_t crc_lock; /* protects crc */
+
+	/** @crc_lock: lock protecting crc */
+	spinlock_t crc_lock;
+
+	/** @report_work: work struct for reporting detected loops */
 	struct work_struct report_work;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_bla_claim - claimed non-mesh client structure
- * @addr: mac address of claimed non-mesh client
- * @vid: vlan id this client was detected on
- * @backbone_gw: pointer to backbone gw claiming this client
- * @backbone_lock: lock protecting backbone_gw pointer
- * @lasttime: last time we heard of claim (locals only)
- * @hash_entry: hlist node for batadv_priv_bla::claim_hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_claim {
+	/** @addr: mac address of claimed non-mesh client */
 	u8 addr[ETH_ALEN];
+
+	/** @vid: vlan id this client was detected on */
 	unsigned short vid;
+
+	/** @backbone_gw: pointer to backbone gw claiming this client */
 	struct batadv_bla_backbone_gw *backbone_gw;
-	spinlock_t backbone_lock; /* protects backbone_gw */
+
+	/** @backbone_lock: lock protecting backbone_gw pointer */
+	spinlock_t backbone_lock;
+
+	/** @lasttime: last time we heard of claim (locals only) */
 	unsigned long lasttime;
+
+	/** @hash_entry: hlist node for &batadv_priv_bla.claim_hash */
 	struct hlist_node hash_entry;
+
+	/** @refcount: number of contexts the object is used */
 	struct rcu_head rcu;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct kref refcount;
 };
 #endif
 
 /**
  * struct batadv_tt_common_entry - tt local & tt global common data
- * @addr: mac address of non-mesh client
- * @vid: VLAN identifier
- * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
- *  batadv_priv_tt::global_hash
- * @flags: various state handling flags (see batadv_tt_client_flags)
- * @added_at: timestamp used for purging stale tt common entries
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_common_entry {
+	/** @addr: mac address of non-mesh client */
 	u8 addr[ETH_ALEN];
+
+	/** @vid: VLAN identifier */
 	unsigned short vid;
+
+	/**
+	 * @hash_entry: hlist node for &batadv_priv_tt.local_hash or for
+	 *  &batadv_priv_tt.global_hash
+	 */
 	struct hlist_node hash_entry;
+
+	/** @flags: various state handling flags (see batadv_tt_client_flags) */
 	u16 flags;
+
+	/** @added_at: timestamp used for purging stale tt common entries */
 	unsigned long added_at;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_local_entry - translation table local entry data
- * @common: general translation table data
- * @last_seen: timestamp used for purging stale tt local entries
- * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
+	/** @common: general translation table data */
 	struct batadv_tt_common_entry common;
+
+	/** @last_seen: timestamp used for purging stale tt local entries */
 	unsigned long last_seen;
+
+	/** @vlan: soft-interface vlan of the entry */
 	struct batadv_softif_vlan *vlan;
 };
 
 /**
  * struct batadv_tt_global_entry - translation table global entry data
- * @common: general translation table data
- * @orig_list: list of orig nodes announcing this non-mesh client
- * @orig_list_count: number of items in the orig_list
- * @list_lock: lock protecting orig_list
- * @roam_at: time at which TT_GLOBAL_ROAM was set
  */
 struct batadv_tt_global_entry {
+	/** @common: general translation table data */
 	struct batadv_tt_common_entry common;
+
+	/** @orig_list: list of orig nodes announcing this non-mesh client */
 	struct hlist_head orig_list;
+
+	/** @orig_list_count: number of items in the orig_list */
 	atomic_t orig_list_count;
-	spinlock_t list_lock;	/* protects orig_list */
+
+	/** @list_lock: lock protecting orig_list */
+	spinlock_t list_lock;
+
+	/** @roam_at: time at which TT_GLOBAL_ROAM was set */
 	unsigned long roam_at;
 };
 
 /**
  * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
- * @orig_node: pointer to orig node announcing this non-mesh client
- * @ttvn: translation table version number which added the non-mesh client
- * @flags: per orig entry TT sync flags
- * @list: list node for batadv_tt_global_entry::orig_list
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_orig_list_entry {
+	/** @orig_node: pointer to orig node announcing this non-mesh client */
 	struct batadv_orig_node *orig_node;
+
+	/**
+	 * @ttvn: translation table version number which added the non-mesh
+	 *  client
+	 */
 	u8 ttvn;
+
+	/** @flags: per orig entry TT sync flags */
 	u8 flags;
+
+	/** @list: list node for &batadv_tt_global_entry.orig_list */
 	struct hlist_node list;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_tt_change_node - structure for tt changes occurred
- * @list: list node for batadv_priv_tt::changes_list
- * @change: holds the actual translation table diff data
  */
 struct batadv_tt_change_node {
+	/** @list: list node for &batadv_priv_tt.changes_list */
 	struct list_head list;
+
+	/** @change: holds the actual translation table diff data */
 	struct batadv_tvlv_tt_change change;
 };
 
 /**
  * struct batadv_tt_req_node - data to keep track of the tt requests in flight
- * @addr: mac address address of the originator this request was sent to
- * @issued_at: timestamp used for purging stale tt requests
- * @refcount: number of contexts the object is used by
- * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
+	/**
+	 * @addr: mac address address of the originator this request was sent to
+	 */
 	u8 addr[ETH_ALEN];
+
+	/** @issued_at: timestamp used for purging stale tt requests */
 	unsigned long issued_at;
+
+	/** @refcount: number of contexts the object is used by */
 	struct kref refcount;
+
+	/** @list: list node for &batadv_priv_tt.req_list */
 	struct hlist_node list;
 };
 
 /**
  * struct batadv_tt_roam_node - roaming client data
- * @addr: mac address of the client in the roaming phase
- * @counter: number of allowed roaming events per client within a single
- *  OGM interval (changes are committed with each OGM)
- * @first_time: timestamp used for purging stale roaming node entries
- * @list: list node for batadv_priv_tt::roam_list
  */
 struct batadv_tt_roam_node {
+	/** @addr: mac address of the client in the roaming phase */
 	u8 addr[ETH_ALEN];
+
+	/**
+	 * @counter: number of allowed roaming events per client within a single
+	 * OGM interval (changes are committed with each OGM)
+	 */
 	atomic_t counter;
+
+	/**
+	 * @first_time: timestamp used for purging stale roaming node entries
+	 */
 	unsigned long first_time;
+
+	/** @list: list node for &batadv_priv_tt.roam_list */
 	struct list_head list;
 };
 
 /**
  * struct batadv_nc_node - network coding node
- * @list: next and prev pointer for the list handling
- * @addr: the node's mac address
- * @refcount: number of contexts the object is used by
- * @rcu: struct used for freeing in an RCU-safe manner
- * @orig_node: pointer to corresponding orig node struct
- * @last_seen: timestamp of last ogm received from this node
  */
 struct batadv_nc_node {
+	/** @list: next and prev pointer for the list handling */
 	struct list_head list;
+
+	/** @addr: the node's mac address */
 	u8 addr[ETH_ALEN];
+
+	/** @refcount: number of contexts the object is used by */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @orig_node: pointer to corresponding orig node struct */
 	struct batadv_orig_node *orig_node;
+
+	/** @last_seen: timestamp of last ogm received from this node */
 	unsigned long last_seen;
 };
 
 /**
  * struct batadv_nc_path - network coding path
- * @hash_entry: next and prev pointer for the list handling
- * @rcu: struct used for freeing in an RCU-safe manner
- * @refcount: number of contexts the object is used by
- * @packet_list: list of buffered packets for this path
- * @packet_list_lock: access lock for packet list
- * @next_hop: next hop (destination) of path
- * @prev_hop: previous hop (source) of path
- * @last_valid: timestamp for last validation of path
  */
 struct batadv_nc_path {
+	/** @hash_entry: next and prev pointer for the list handling */
 	struct hlist_node hash_entry;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @refcount: number of contexts the object is used by */
 	struct kref refcount;
+
+	/** @packet_list: list of buffered packets for this path */
 	struct list_head packet_list;
-	spinlock_t packet_list_lock; /* Protects packet_list */
+
+	/** @packet_list_lock: access lock for packet list */
+	spinlock_t packet_list_lock;
+
+	/** @next_hop: next hop (destination) of path */
 	u8 next_hop[ETH_ALEN];
+
+	/** @prev_hop: previous hop (source) of path */
 	u8 prev_hop[ETH_ALEN];
+
+	/** @last_valid: timestamp for last validation of path */
 	unsigned long last_valid;
 };
 
 /**
  * struct batadv_nc_packet - network coding packet used when coding and
  *  decoding packets
- * @list: next and prev pointer for the list handling
- * @packet_id: crc32 checksum of skb data
- * @timestamp: field containing the info when the packet was added to path
- * @neigh_node: pointer to original next hop neighbor of skb
- * @skb: skb which can be encoded or used for decoding
- * @nc_path: pointer to path this nc packet is attached to
  */
 struct batadv_nc_packet {
+	/** @list: next and prev pointer for the list handling */
 	struct list_head list;
+
+	/** @packet_id: crc32 checksum of skb data */
 	__be32 packet_id;
+
+	/**
+	 * @timestamp: field containing the info when the packet was added to
+	 *  path
+	 */
 	unsigned long timestamp;
+
+	/** @neigh_node: pointer to original next hop neighbor of skb */
 	struct batadv_neigh_node *neigh_node;
+
+	/** @skb: skb which can be encoded or used for decoding */
 	struct sk_buff *skb;
+
+	/** @nc_path: pointer to path this nc packet is attached to */
 	struct batadv_nc_path *nc_path;
 };
 
 /**
  * struct batadv_skb_cb - control buffer structure used to store private data
  *  relevant to batman-adv in the skb->cb buffer in skbs.
- * @decoded: Marks a skb as decoded, which is checked when searching for coding
- *  opportunities in network-coding.c
- * @num_bcasts: Counter for broadcast packet retransmissions
  */
 struct batadv_skb_cb {
+	/**
+	 * @decoded: Marks a skb as decoded, which is checked when searching for
+	 *  coding opportunities in network-coding.c
+	 */
 	bool decoded;
+
+	/** @num_bcasts: Counter for broadcast packet retransmissions */
 	unsigned int num_bcasts;
 };
 
 /**
  * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
- * @list: list node for batadv_priv::forw_{bat,bcast}_list
- * @cleanup_list: list node for purging functions
- * @send_time: execution time for delayed_work (packet sending)
- * @own: bool for locally generated packets (local OGMs are re-scheduled after
- *  sending)
- * @skb: bcast packet's skb buffer
- * @packet_len: size of aggregated OGM packet inside the skb buffer
- * @direct_link_flags: direct link flags for aggregated OGM packets
- * @num_packets: counter for aggregated OGMv1 packets
- * @delayed_work: work queue callback item for packet sending
- * @if_incoming: pointer to incoming hard-iface or primary iface if
- *  locally generated packet
- * @if_outgoing: packet where the packet should be sent to, or NULL if
- *  unspecified
- * @queue_left: The queue (counter) this packet was applied to
  */
 struct batadv_forw_packet {
+	/**
+	 * @list: list node for &batadv_priv.forw.bcast_list and
+	 *  &batadv_priv.forw.bat_list
+	 */
 	struct hlist_node list;
+
+	/** @cleanup_list: list node for purging functions */
 	struct hlist_node cleanup_list;
+
+	/** @send_time: execution time for delayed_work (packet sending) */
 	unsigned long send_time;
+
+	/**
+	 * @own: bool for locally generated packets (local OGMs are re-scheduled
+	 * after sending)
+	 */
 	u8 own;
+
+	/** @skb: bcast packet's skb buffer */
 	struct sk_buff *skb;
+
+	/** @packet_len: size of aggregated OGM packet inside the skb buffer */
 	u16 packet_len;
+
+	/** @direct_link_flags: direct link flags for aggregated OGM packets */
 	u32 direct_link_flags;
+
+	/** @num_packets: counter for aggregated OGMv1 packets */
 	u8 num_packets;
+
+	/** @delayed_work: work queue callback item for packet sending */
 	struct delayed_work delayed_work;
+
+	/**
+	 * @if_incoming: pointer to incoming hard-iface or primary iface if
+	 *  locally generated packet
+	 */
 	struct batadv_hard_iface *if_incoming;
+
+	/**
+	 * @if_outgoing: packet where the packet should be sent to, or NULL if
+	 *  unspecified
+	 */
 	struct batadv_hard_iface *if_outgoing;
+
+	/** @queue_left: The queue (counter) this packet was applied to */
 	atomic_t *queue_left;
 };
 
 /**
  * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific)
- * @activate: start routing mechanisms when hard-interface is brought up
- *  (optional)
- * @enable: init routing info when hard-interface is enabled
- * @disable: de-init routing info when hard-interface is disabled
- * @update_mac: (re-)init mac addresses of the protocol information
- *  belonging to this hard-interface
- * @primary_set: called when primary interface is selected / changed
  */
 struct batadv_algo_iface_ops {
+	/**
+	 * @activate: start routing mechanisms when hard-interface is brought up
+	 *  (optional)
+	 */
 	void (*activate)(struct batadv_hard_iface *hard_iface);
+
+	/** @enable: init routing info when hard-interface is enabled */
 	int (*enable)(struct batadv_hard_iface *hard_iface);
+
+	/** @disable: de-init routing info when hard-interface is disabled */
 	void (*disable)(struct batadv_hard_iface *hard_iface);
+
+	/**
+	 * @update_mac: (re-)init mac addresses of the protocol information
+	 *  belonging to this hard-interface
+	 */
 	void (*update_mac)(struct batadv_hard_iface *hard_iface);
+
+	/** @primary_set: called when primary interface is selected / changed */
 	void (*primary_set)(struct batadv_hard_iface *hard_iface);
 };
 
 /**
  * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific)
- * @hardif_init: called on creation of single hop entry
- *  (optional)
- * @cmp: compare the metrics of two neighbors for their respective outgoing
- *  interfaces
- * @is_similar_or_better: check if neigh1 is equally similar or better than
- *  neigh2 for their respective outgoing interface from the metric prospective
- * @print: print the single hop neighbor list (optional)
- * @dump: dump neighbors to a netlink socket (optional)
  */
 struct batadv_algo_neigh_ops {
+	/** @hardif_init: called on creation of single hop entry (optional) */
 	void (*hardif_init)(struct batadv_hardif_neigh_node *neigh);
+
+	/**
+	 * @cmp: compare the metrics of two neighbors for their respective
+	 *  outgoing interfaces
+	 */
 	int (*cmp)(struct batadv_neigh_node *neigh1,
 		   struct batadv_hard_iface *if_outgoing1,
 		   struct batadv_neigh_node *neigh2,
 		   struct batadv_hard_iface *if_outgoing2);
+
+	/**
+	 * @is_similar_or_better: check if neigh1 is equally similar or better
+	 *  than neigh2 for their respective outgoing interface from the metric
+	 *  prospective
+	 */
 	bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1,
 				     struct batadv_hard_iface *if_outgoing1,
 				     struct batadv_neigh_node *neigh2,
 				     struct batadv_hard_iface *if_outgoing2);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+	/** @print: print the single hop neighbor list (optional) */
 	void (*print)(struct batadv_priv *priv, struct seq_file *seq);
 #endif
+
+	/** @dump: dump neighbors to a netlink socket (optional) */
 	void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
 		     struct batadv_priv *priv,
 		     struct batadv_hard_iface *hard_iface);
@@ -1466,24 +2173,36 @@ struct batadv_algo_neigh_ops {
 
 /**
  * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific)
- * @free: free the resources allocated by the routing algorithm for an orig_node
- *  object (optional)
- * @add_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to a new hard-interface being added into the mesh (optional)
- * @del_if: ask the routing algorithm to apply the needed changes to the
- *  orig_node due to an hard-interface being removed from the mesh (optional)
- * @print: print the originator table (optional)
- * @dump: dump originators to a netlink socket (optional)
  */
 struct batadv_algo_orig_ops {
+	/**
+	 * @free: free the resources allocated by the routing algorithm for an
+	 *  orig_node object (optional)
+	 */
 	void (*free)(struct batadv_orig_node *orig_node);
+
+	/**
+	 * @add_if: ask the routing algorithm to apply the needed changes to the
+	 *  orig_node due to a new hard-interface being added into the mesh
+	 *  (optional)
+	 */
 	int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
+
+	/**
+	 * @del_if: ask the routing algorithm to apply the needed changes to the
+	 *  orig_node due to an hard-interface being removed from the mesh
+	 *  (optional)
+	 */
 	int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
 		      int del_if_num);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+	/** @print: print the originator table (optional) */
 	void (*print)(struct batadv_priv *priv, struct seq_file *seq,
 		      struct batadv_hard_iface *hard_iface);
 #endif
+
+	/** @dump: dump originators to a netlink socket (optional) */
 	void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
 		     struct batadv_priv *priv,
 		     struct batadv_hard_iface *hard_iface);
@@ -1491,158 +2210,213 @@ struct batadv_algo_orig_ops {
 
 /**
  * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
- * @init_sel_class: initialize GW selection class (optional)
- * @store_sel_class: parse and stores a new GW selection class (optional)
- * @show_sel_class: prints the current GW selection class (optional)
- * @get_best_gw_node: select the best GW from the list of available nodes
- *  (optional)
- * @is_eligible: check if a newly discovered GW is a potential candidate for
- *  the election as best GW (optional)
- * @print: print the gateway table (optional)
- * @dump: dump gateways to a netlink socket (optional)
  */
 struct batadv_algo_gw_ops {
+	/** @init_sel_class: initialize GW selection class (optional) */
 	void (*init_sel_class)(struct batadv_priv *bat_priv);
+
+	/**
+	 * @store_sel_class: parse and stores a new GW selection class
+	 *  (optional)
+	 */
 	ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
 				   size_t count);
+
+	/** @show_sel_class: prints the current GW selection class (optional) */
 	ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
+
+	/**
+	 * @get_best_gw_node: select the best GW from the list of available
+	 *  nodes (optional)
+	 */
 	struct batadv_gw_node *(*get_best_gw_node)
 		(struct batadv_priv *bat_priv);
+
+	/**
+	 * @is_eligible: check if a newly discovered GW is a potential candidate
+	 *  for the election as best GW (optional)
+	 */
 	bool (*is_eligible)(struct batadv_priv *bat_priv,
 			    struct batadv_orig_node *curr_gw_orig,
 			    struct batadv_orig_node *orig_node);
+
 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
+	/** @print: print the gateway table (optional) */
 	void (*print)(struct batadv_priv *bat_priv, struct seq_file *seq);
 #endif
+
+	/** @dump: dump gateways to a netlink socket (optional) */
 	void (*dump)(struct sk_buff *msg, struct netlink_callback *cb,
 		     struct batadv_priv *priv);
 };
 
 /**
  * struct batadv_algo_ops - mesh algorithm callbacks
- * @list: list node for the batadv_algo_list
- * @name: name of the algorithm
- * @iface: callbacks related to interface handling
- * @neigh: callbacks related to neighbors handling
- * @orig: callbacks related to originators handling
- * @gw: callbacks related to GW mode
  */
 struct batadv_algo_ops {
+	/** @list: list node for the batadv_algo_list */
 	struct hlist_node list;
+
+	/** @name: name of the algorithm */
 	char *name;
+
+	/** @iface: callbacks related to interface handling */
 	struct batadv_algo_iface_ops iface;
+
+	/** @neigh: callbacks related to neighbors handling */
 	struct batadv_algo_neigh_ops neigh;
+
+	/** @orig: callbacks related to originators handling */
 	struct batadv_algo_orig_ops orig;
+
+	/** @gw: callbacks related to GW mode */
 	struct batadv_algo_gw_ops gw;
 };
 
 /**
  * struct batadv_dat_entry - it is a single entry of batman-adv ARP backend. It
  * is used to stored ARP entries needed for the global DAT cache
- * @ip: the IPv4 corresponding to this DAT/ARP entry
- * @mac_addr: the MAC address associated to the stored IPv4
- * @vid: the vlan ID associated to this entry
- * @last_update: time in jiffies when this entry was refreshed last time
- * @hash_entry: hlist node for batadv_priv_dat::hash
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_dat_entry {
+	/** @ip: the IPv4 corresponding to this DAT/ARP entry */
 	__be32 ip;
+
+	/** @mac_addr: the MAC address associated to the stored IPv4 */
 	u8 mac_addr[ETH_ALEN];
+
+	/** @vid: the vlan ID associated to this entry */
 	unsigned short vid;
+
+	/**
+	 * @last_update: time in jiffies when this entry was refreshed last time
+	 */
 	unsigned long last_update;
+
+	/** @hash_entry: hlist node for &batadv_priv_dat.hash */
 	struct hlist_node hash_entry;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * struct batadv_hw_addr - a list entry for a MAC address
- * @list: list node for the linking of entries
- * @addr: the MAC address of this list entry
  */
 struct batadv_hw_addr {
+	/** @list: list node for the linking of entries */
 	struct hlist_node list;
+
+	/** @addr: the MAC address of this list entry */
 	unsigned char addr[ETH_ALEN];
 };
 
 /**
  * struct batadv_dat_candidate - candidate destination for DAT operations
- * @type: the type of the selected candidate. It can one of the following:
- *	  - BATADV_DAT_CANDIDATE_NOT_FOUND
- *	  - BATADV_DAT_CANDIDATE_ORIG
- * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to the
- *	       corresponding originator node structure
  */
 struct batadv_dat_candidate {
+	/**
+	 * @type: the type of the selected candidate. It can one of the
+	 *  following:
+	 *	  - BATADV_DAT_CANDIDATE_NOT_FOUND
+	 *	  - BATADV_DAT_CANDIDATE_ORIG
+	 */
 	int type;
+
+	/**
+	 * @orig_node: if type is BATADV_DAT_CANDIDATE_ORIG this field points to
+	 * the corresponding originator node structure
+	 */
 	struct batadv_orig_node *orig_node;
 };
 
 /**
  * struct batadv_tvlv_container - container for tvlv appended to OGMs
- * @list: hlist node for batadv_priv_tvlv::container_list
- * @tvlv_hdr: tvlv header information needed to construct the tvlv
- * @refcount: number of contexts the object is used
  */
 struct batadv_tvlv_container {
+	/** @list: hlist node for &batadv_priv_tvlv.container_list */
 	struct hlist_node list;
+
+	/** @tvlv_hdr: tvlv header information needed to construct the tvlv */
 	struct batadv_tvlv_hdr tvlv_hdr;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
 };
 
 /**
  * struct batadv_tvlv_handler - handler for specific tvlv type and version
- * @list: hlist node for batadv_priv_tvlv::handler_list
- * @ogm_handler: handler callback which is given the tvlv payload to process on
- *  incoming OGM packets
- * @unicast_handler: handler callback which is given the tvlv payload to process
- *  on incoming unicast tvlv packets
- * @type: tvlv type this handler feels responsible for
- * @version: tvlv version this handler feels responsible for
- * @flags: tvlv handler flags
- * @refcount: number of contexts the object is used
- * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tvlv_handler {
+	/** @list: hlist node for &batadv_priv_tvlv.handler_list */
 	struct hlist_node list;
+
+	/**
+	 * @ogm_handler: handler callback which is given the tvlv payload to
+	 *  process on incoming OGM packets
+	 */
 	void (*ogm_handler)(struct batadv_priv *bat_priv,
 			    struct batadv_orig_node *orig,
 			    u8 flags, void *tvlv_value, u16 tvlv_value_len);
+
+	/**
+	 * @unicast_handler: handler callback which is given the tvlv payload to
+	 *  process on incoming unicast tvlv packets
+	 */
 	int (*unicast_handler)(struct batadv_priv *bat_priv,
 			       u8 *src, u8 *dst,
 			       void *tvlv_value, u16 tvlv_value_len);
+
+	/** @type: tvlv type this handler feels responsible for */
 	u8 type;
+
+	/** @version: tvlv version this handler feels responsible for */
 	u8 version;
+
+	/** @flags: tvlv handler flags */
 	u8 flags;
+
+	/** @refcount: number of contexts the object is used */
 	struct kref refcount;
+
+	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
 };
 
 /**
  * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
- * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
- *  this handler even if its type was not found (with no data)
- * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
- *  a handler as being called, so it won't be called if the
- *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
  */
 enum batadv_tvlv_handler_flags {
+	/**
+	 * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function
+	 *  will call this handler even if its type was not found (with no data)
+	 */
 	BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+
+	/**
+	 * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the
+	 *  API marks a handler as being called, so it won't be called if the
+	 *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+	 */
 	BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
 };
 
 /**
  * struct batadv_store_mesh_work - Work queue item to detach add/del interface
  *  from sysfs locks
- * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
- * @soft_iface_name: name of soft-interface to modify
- * @work: work queue item
  */
 struct batadv_store_mesh_work {
+	/**
+	 * @net_dev: netdevice to add/remove to/from batman-adv soft-interface
+	 */
 	struct net_device *net_dev;
+
+	/** @soft_iface_name: name of soft-interface to modify */
 	char soft_iface_name[IFNAMSIZ];
+
+	/** @work: work queue item */
 	struct work_struct work;
 };
 
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 91e3ba2..f044202 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -766,43 +766,39 @@ static int __init bt_init(void)
 		return err;
 
 	err = sock_register(&bt_sock_family_ops);
-	if (err < 0) {
-		bt_sysfs_cleanup();
-		return err;
-	}
+	if (err)
+		goto cleanup_sysfs;
 
 	BT_INFO("HCI device and connection manager initialized");
 
 	err = hci_sock_init();
-	if (err < 0)
-		goto error;
+	if (err)
+		goto unregister_socket;
 
 	err = l2cap_init();
-	if (err < 0)
-		goto sock_err;
+	if (err)
+		goto cleanup_socket;
 
 	err = sco_init();
-	if (err < 0) {
-		l2cap_exit();
-		goto sock_err;
-	}
+	if (err)
+		goto cleanup_cap;
 
 	err = mgmt_init();
-	if (err < 0) {
-		sco_exit();
-		l2cap_exit();
-		goto sock_err;
-	}
+	if (err)
+		goto cleanup_sco;
 
 	return 0;
 
-sock_err:
+cleanup_sco:
+	sco_exit();
+cleanup_cap:
+	l2cap_exit();
+cleanup_socket:
 	hci_sock_cleanup();
-
-error:
+unregister_socket:
 	sock_unregister(PF_BLUETOOTH);
+cleanup_sysfs:
 	bt_sysfs_cleanup();
-
 	return err;
 }
 
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 63df63e..57403bd 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -88,6 +88,9 @@ static int __name ## _show(struct seq_file *f, void *ptr)		      \
 	return 0;							      \
 }									      \
 									      \
+DEFINE_SHOW_ATTRIBUTE(__name)
+
+#define DEFINE_SHOW_ATTRIBUTE(__name)					      \
 static int __name ## _open(struct inode *inode, struct file *file)	      \
 {									      \
 	return single_open(file, __name ## _show, inode->i_private);	      \
@@ -106,37 +109,16 @@ static int features_show(struct seq_file *f, void *ptr)
 	u8 p;
 
 	hci_dev_lock(hdev);
-	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
-		seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
-			   hdev->features[p][0], hdev->features[p][1],
-			   hdev->features[p][2], hdev->features[p][3],
-			   hdev->features[p][4], hdev->features[p][5],
-			   hdev->features[p][6], hdev->features[p][7]);
-	}
+	for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++)
+		seq_printf(f, "%2u: %8ph\n", p, hdev->features[p]);
 	if (lmp_le_capable(hdev))
-		seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
-			   "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
-			   hdev->le_features[0], hdev->le_features[1],
-			   hdev->le_features[2], hdev->le_features[3],
-			   hdev->le_features[4], hdev->le_features[5],
-			   hdev->le_features[6], hdev->le_features[7]);
+		seq_printf(f, "LE: %8ph\n", hdev->le_features);
 	hci_dev_unlock(hdev);
 
 	return 0;
 }
 
-static int features_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, features_show, inode->i_private);
-}
-
-static const struct file_operations features_fops = {
-	.open		= features_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(features);
 
 static int device_id_show(struct seq_file *f, void *ptr)
 {
@@ -150,17 +132,7 @@ static int device_id_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int device_id_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, device_id_show, inode->i_private);
-}
-
-static const struct file_operations device_id_fops = {
-	.open		= device_id_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_id);
 
 static int device_list_show(struct seq_file *f, void *ptr)
 {
@@ -180,17 +152,7 @@ static int device_list_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int device_list_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, device_list_show, inode->i_private);
-}
-
-static const struct file_operations device_list_fops = {
-	.open		= device_list_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_list);
 
 static int blacklist_show(struct seq_file *f, void *p)
 {
@@ -205,17 +167,7 @@ static int blacklist_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int blacklist_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
-	.open		= blacklist_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(blacklist);
 
 static int uuids_show(struct seq_file *f, void *p)
 {
@@ -240,17 +192,7 @@ static int uuids_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int uuids_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
-	.open		= uuids_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(uuids);
 
 static int remote_oob_show(struct seq_file *f, void *ptr)
 {
@@ -269,17 +211,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int remote_oob_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, remote_oob_show, inode->i_private);
-}
-
-static const struct file_operations remote_oob_fops = {
-	.open		= remote_oob_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(remote_oob);
 
 static int conn_info_min_age_set(void *data, u64 val)
 {
@@ -443,17 +375,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
-	.open		= inquiry_cache_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(inquiry_cache);
 
 static int link_keys_show(struct seq_file *f, void *ptr)
 {
@@ -469,17 +391,7 @@ static int link_keys_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int link_keys_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, link_keys_show, inode->i_private);
-}
-
-static const struct file_operations link_keys_fops = {
-	.open		= link_keys_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(link_keys);
 
 static int dev_class_show(struct seq_file *f, void *ptr)
 {
@@ -493,17 +405,7 @@ static int dev_class_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int dev_class_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, dev_class_show, inode->i_private);
-}
-
-static const struct file_operations dev_class_fops = {
-	.open		= dev_class_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dev_class);
 
 static int voice_setting_get(void *data, u64 *val)
 {
@@ -692,17 +594,7 @@ static int identity_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int identity_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, identity_show, inode->i_private);
-}
-
-static const struct file_operations identity_fops = {
-	.open		= identity_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity);
 
 static int rpa_timeout_set(void *data, u64 val)
 {
@@ -746,17 +638,7 @@ static int random_address_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int random_address_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, random_address_show, inode->i_private);
-}
-
-static const struct file_operations random_address_fops = {
-	.open		= random_address_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(random_address);
 
 static int static_address_show(struct seq_file *f, void *p)
 {
@@ -769,17 +651,7 @@ static int static_address_show(struct seq_file *f, void *p)
 	return 0;
 }
 
-static int static_address_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, static_address_show, inode->i_private);
-}
-
-static const struct file_operations static_address_fops = {
-	.open		= static_address_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(static_address);
 
 static ssize_t force_static_address_read(struct file *file,
 					 char __user *user_buf,
@@ -841,17 +713,7 @@ static int white_list_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int white_list_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, white_list_show, inode->i_private);
-}
-
-static const struct file_operations white_list_fops = {
-	.open		= white_list_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(white_list);
 
 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
 {
@@ -869,18 +731,7 @@ static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, identity_resolving_keys_show,
-			   inode->i_private);
-}
-
-static const struct file_operations identity_resolving_keys_fops = {
-	.open		= identity_resolving_keys_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity_resolving_keys);
 
 static int long_term_keys_show(struct seq_file *f, void *ptr)
 {
@@ -898,17 +749,7 @@ static int long_term_keys_show(struct seq_file *f, void *ptr)
 	return 0;
 }
 
-static int long_term_keys_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, long_term_keys_show, inode->i_private);
-}
-
-static const struct file_operations long_term_keys_fops = {
-	.open		= long_term_keys_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(long_term_keys);
 
 static int conn_min_interval_set(void *data, u64 val)
 {
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index abc0f32..3394e6791 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -919,6 +919,43 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
 	return true;
 }
 
+static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
+{
+	/* If there is no connection we are OK to advertise. */
+	if (hci_conn_num(hdev, LE_LINK) == 0)
+		return true;
+
+	/* Check le_states if there is any connection in slave role. */
+	if (hdev->conn_hash.le_num_slave > 0) {
+		/* Slave connection state and non connectable mode bit 20. */
+		if (!connectable && !(hdev->le_states[2] & 0x10))
+			return false;
+
+		/* Slave connection state and connectable mode bit 38
+		 * and scannable bit 21.
+		 */
+		if (connectable && (!(hdev->le_states[4] & 0x01) ||
+				    !(hdev->le_states[2] & 0x40)))
+			return false;
+	}
+
+	/* Check le_states if there is any connection in master role. */
+	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
+		/* Master connection state and non connectable mode bit 18. */
+		if (!connectable && !(hdev->le_states[2] & 0x02))
+			return false;
+
+		/* Master connection state and connectable mode bit 35 and
+		 * scannable 19.
+		 */
+		if (connectable && (!(hdev->le_states[4] & 0x10) ||
+				    !(hdev->le_states[2] & 0x08)))
+			return false;
+	}
+
+	return true;
+}
+
 void __hci_req_enable_advertising(struct hci_request *req)
 {
 	struct hci_dev *hdev = req->hdev;
@@ -927,7 +964,15 @@ void __hci_req_enable_advertising(struct hci_request *req)
 	bool connectable;
 	u32 flags;
 
-	if (hci_conn_num(hdev, LE_LINK) > 0)
+	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
+
+	/* If the "connectable" instance flag was not set, then choose between
+	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+	 */
+	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+		      mgmt_get_connectable(hdev);
+
+	if (!is_advertising_allowed(hdev, connectable))
 		return;
 
 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
@@ -940,14 +985,6 @@ void __hci_req_enable_advertising(struct hci_request *req)
 	 */
 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
-	flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
-
-	/* If the "connectable" instance flag was not set, then choose between
-	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
-	 */
-	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
-		      mgmt_get_connectable(hdev);
-
 	/* Set require_privacy to true only when non-connectable
 	 * advertising is used. In that case it is fine to use a
 	 * non-resolvable private address.
@@ -1985,13 +2022,6 @@ static void le_scan_restart_work(struct work_struct *work)
 	hci_dev_unlock(hdev);
 }
 
-static void disable_advertising(struct hci_request *req)
-{
-	u8 enable = 0x00;
-
-	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
 static int active_scan(struct hci_request *req, unsigned long opt)
 {
 	uint16_t interval = opt;
@@ -2017,7 +2047,7 @@ static int active_scan(struct hci_request *req, unsigned long opt)
 		cancel_adv_timeout(hdev);
 		hci_dev_unlock(hdev);
 
-		disable_advertising(req);
+		__hci_req_disable_advertising(req);
 	}
 
 	/* If controller is scanning, it means the background scanning is
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index af5b8c8..1285ca3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -125,9 +125,16 @@ static int br_dev_init(struct net_device *dev)
 	if (!br->stats)
 		return -ENOMEM;
 
+	err = br_fdb_hash_init(br);
+	if (err) {
+		free_percpu(br->stats);
+		return err;
+	}
+
 	err = br_vlan_init(br);
 	if (err) {
 		free_percpu(br->stats);
+		br_fdb_hash_fini(br);
 		return err;
 	}
 
@@ -135,6 +142,7 @@ static int br_dev_init(struct net_device *dev)
 	if (err) {
 		free_percpu(br->stats);
 		br_vlan_flush(br);
+		br_fdb_hash_fini(br);
 	}
 	br_set_lockdep_class(dev);
 
@@ -148,6 +156,7 @@ static void br_dev_uninit(struct net_device *dev)
 	br_multicast_dev_del(br);
 	br_multicast_uninit_stats(br);
 	br_vlan_flush(br);
+	br_fdb_hash_fini(br);
 	free_percpu(br->stats);
 }
 
@@ -416,6 +425,7 @@ void br_dev_setup(struct net_device *dev)
 	br->dev = dev;
 	spin_lock_init(&br->lock);
 	INIT_LIST_HEAD(&br->port_list);
+	INIT_HLIST_HEAD(&br->fdb_list);
 	spin_lock_init(&br->hash_lock);
 
 	br->bridge_id.prio[0] = 0x80;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 4ea5c8b..dc87fbc 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,14 +28,20 @@
 #include <trace/events/bridge.h>
 #include "br_private.h"
 
+static const struct rhashtable_params br_fdb_rht_params = {
+	.head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
+	.key_offset = offsetof(struct net_bridge_fdb_entry, key),
+	.key_len = sizeof(struct net_bridge_fdb_key),
+	.automatic_shrinking = true,
+	.locks_mul = 1,
+};
+
 static struct kmem_cache *br_fdb_cache __read_mostly;
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		      const unsigned char *addr, u16 vid);
 static void fdb_notify(struct net_bridge *br,
 		       const struct net_bridge_fdb_entry *, int);
 
-static u32 fdb_salt __read_mostly;
-
 int __init br_fdb_init(void)
 {
 	br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -45,7 +51,6 @@ int __init br_fdb_init(void)
 	if (!br_fdb_cache)
 		return -ENOMEM;
 
-	get_random_bytes(&fdb_salt, sizeof(fdb_salt));
 	return 0;
 }
 
@@ -54,6 +59,15 @@ void br_fdb_fini(void)
 	kmem_cache_destroy(br_fdb_cache);
 }
 
+int br_fdb_hash_init(struct net_bridge *br)
+{
+	return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
+}
+
+void br_fdb_hash_fini(struct net_bridge *br)
+{
+	rhashtable_destroy(&br->fdb_hash_tbl);
+}
 
 /* if topology_changing then use forward_delay (default 15 sec)
  * otherwise keep longer (default 5 minutes)
@@ -70,13 +84,6 @@ static inline int has_expired(const struct net_bridge *br,
 		time_before_eq(fdb->updated + hold_time(br), jiffies);
 }
 
-static inline int br_mac_hash(const unsigned char *mac, __u16 vid)
-{
-	/* use 1 byte of OUI and 3 bytes of NIC */
-	u32 key = get_unaligned((u32 *)(mac + 2));
-	return jhash_2words(key, vid, fdb_salt) & (BR_HASH_SIZE - 1);
-}
-
 static void fdb_rcu_free(struct rcu_head *head)
 {
 	struct net_bridge_fdb_entry *ent
@@ -84,19 +91,18 @@ static void fdb_rcu_free(struct rcu_head *head)
 	kmem_cache_free(br_fdb_cache, ent);
 }
 
-static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
 						 const unsigned char *addr,
 						 __u16 vid)
 {
-	struct net_bridge_fdb_entry *f;
+	struct net_bridge_fdb_key key;
 
 	WARN_ON_ONCE(!rcu_read_lock_held());
 
-	hlist_for_each_entry_rcu(f, head, hlist)
-		if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid)
-			break;
+	key.vlan_id = vid;
+	memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
 
-	return f;
+	return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
 }
 
 /* requires bridge hash_lock */
@@ -104,13 +110,12 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
 						const unsigned char *addr,
 						__u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 
 	lockdep_assert_held_once(&br->hash_lock);
 
 	rcu_read_lock();
-	fdb = fdb_find_rcu(head, addr, vid);
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
 	rcu_read_unlock();
 
 	return fdb;
@@ -120,9 +125,7 @@ struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
 					     const unsigned char *addr,
 					     __u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
-
-	return fdb_find_rcu(head, addr, vid);
+	return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
 }
 
 /* When a static FDB entry is added, the mac address from the entry is
@@ -175,9 +178,11 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
 	trace_fdb_delete(br, f);
 
 	if (f->is_static)
-		fdb_del_hw_addr(br, f->addr.addr);
+		fdb_del_hw_addr(br, f->key.addr.addr);
 
-	hlist_del_init_rcu(&f->hlist);
+	hlist_del_init_rcu(&f->fdb_node);
+	rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
+			       br_fdb_rht_params);
 	fdb_notify(br, f, RTM_DELNEIGH);
 	call_rcu(&f->rcu, fdb_rcu_free);
 }
@@ -187,11 +192,11 @@ static void fdb_delete_local(struct net_bridge *br,
 			     const struct net_bridge_port *p,
 			     struct net_bridge_fdb_entry *f)
 {
-	const unsigned char *addr = f->addr.addr;
+	const unsigned char *addr = f->key.addr.addr;
 	struct net_bridge_vlan_group *vg;
 	const struct net_bridge_vlan *v;
 	struct net_bridge_port *op;
-	u16 vid = f->vlan_id;
+	u16 vid = f->key.vlan_id;
 
 	/* Maybe another port has same hw addr? */
 	list_for_each_entry(op, &br->port_list, list) {
@@ -233,31 +238,23 @@ void br_fdb_find_delete_local(struct net_bridge *br,
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 {
 	struct net_bridge_vlan_group *vg;
+	struct net_bridge_fdb_entry *f;
 	struct net_bridge *br = p->br;
 	struct net_bridge_vlan *v;
-	int i;
 
 	spin_lock_bh(&br->hash_lock);
-
 	vg = nbp_vlan_group(p);
-	/* Search all chains since old address/hash is unknown */
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct hlist_node *h;
-		hlist_for_each(h, &br->hash[i]) {
-			struct net_bridge_fdb_entry *f;
+	hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
+		if (f->dst == p && f->is_local && !f->added_by_user) {
+			/* delete old one */
+			fdb_delete_local(br, p, f);
 
-			f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
-			if (f->dst == p && f->is_local && !f->added_by_user) {
-				/* delete old one */
-				fdb_delete_local(br, p, f);
-
-				/* if this port has no vlan information
-				 * configured, we can safely be done at
-				 * this point.
-				 */
-				if (!vg || !vg->num_vlans)
-					goto insert;
-			}
+			/* if this port has no vlan information
+			 * configured, we can safely be done at
+			 * this point.
+			 */
+			if (!vg || !vg->num_vlans)
+				goto insert;
 		}
 	}
 
@@ -316,35 +313,32 @@ void br_fdb_cleanup(struct work_struct *work)
 {
 	struct net_bridge *br = container_of(work, struct net_bridge,
 					     gc_work.work);
+	struct net_bridge_fdb_entry *f = NULL;
 	unsigned long delay = hold_time(br);
 	unsigned long work_delay = delay;
 	unsigned long now = jiffies;
-	int i;
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct net_bridge_fdb_entry *f;
-		struct hlist_node *n;
+	/* this part is tricky, in order to avoid blocking learning and
+	 * consequently forwarding, we rely on rcu to delete objects with
+	 * delayed freeing allowing us to continue traversing
+	 */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		unsigned long this_timer;
 
-		if (!br->hash[i].first)
+		if (f->is_static || f->added_by_external_learn)
 			continue;
-
-		spin_lock_bh(&br->hash_lock);
-		hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
-			unsigned long this_timer;
-
-			if (f->is_static)
-				continue;
-			if (f->added_by_external_learn)
-				continue;
-			this_timer = f->updated + delay;
-			if (time_after(this_timer, now))
-				work_delay = min(work_delay, this_timer - now);
-			else
+		this_timer = f->updated + delay;
+		if (time_after(this_timer, now)) {
+			work_delay = min(work_delay, this_timer - now);
+		} else {
+			spin_lock_bh(&br->hash_lock);
+			if (!hlist_unhashed(&f->fdb_node))
 				fdb_delete(br, f);
+			spin_unlock_bh(&br->hash_lock);
 		}
-		spin_unlock_bh(&br->hash_lock);
-		cond_resched();
 	}
+	rcu_read_unlock();
 
 	/* Cleanup minimum 10 milliseconds apart */
 	work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
@@ -354,16 +348,13 @@ void br_fdb_cleanup(struct work_struct *work)
 /* Completely flush all dynamic entries in forwarding database.*/
 void br_fdb_flush(struct net_bridge *br)
 {
-	int i;
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
 
 	spin_lock_bh(&br->hash_lock);
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct net_bridge_fdb_entry *f;
-		struct hlist_node *n;
-		hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
-			if (!f->is_static)
-				fdb_delete(br, f);
-		}
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (!f->is_static)
+			fdb_delete(br, f);
 	}
 	spin_unlock_bh(&br->hash_lock);
 }
@@ -377,27 +368,22 @@ void br_fdb_delete_by_port(struct net_bridge *br,
 			   u16 vid,
 			   int do_all)
 {
-	int i;
+	struct net_bridge_fdb_entry *f;
+	struct hlist_node *tmp;
 
 	spin_lock_bh(&br->hash_lock);
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct hlist_node *h, *g;
+	hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
+		if (f->dst != p)
+			continue;
 
-		hlist_for_each_safe(h, g, &br->hash[i]) {
-			struct net_bridge_fdb_entry *f
-				= hlist_entry(h, struct net_bridge_fdb_entry, hlist);
-			if (f->dst != p)
+		if (!do_all)
+			if (f->is_static || (vid && f->key.vlan_id != vid))
 				continue;
 
-			if (!do_all)
-				if (f->is_static || (vid && f->vlan_id != vid))
-					continue;
-
-			if (f->is_local)
-				fdb_delete_local(br, p, f);
-			else
-				fdb_delete(br, f);
-		}
+		if (f->is_local)
+			fdb_delete_local(br, p, f);
+		else
+			fdb_delete(br, f);
 	}
 	spin_unlock_bh(&br->hash_lock);
 }
@@ -433,52 +419,48 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
 int br_fdb_fillbuf(struct net_bridge *br, void *buf,
 		   unsigned long maxnum, unsigned long skip)
 {
-	struct __fdb_entry *fe = buf;
-	int i, num = 0;
 	struct net_bridge_fdb_entry *f;
+	struct __fdb_entry *fe = buf;
+	int num = 0;
 
 	memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
 
 	rcu_read_lock();
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
-			if (num >= maxnum)
-				goto out;
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (num >= maxnum)
+			break;
 
-			if (has_expired(br, f))
-				continue;
+		if (has_expired(br, f))
+			continue;
 
-			/* ignore pseudo entry for local MAC address */
-			if (!f->dst)
-				continue;
+		/* ignore pseudo entry for local MAC address */
+		if (!f->dst)
+			continue;
 
-			if (skip) {
-				--skip;
-				continue;
-			}
-
-			/* convert from internal format to API */
-			memcpy(fe->mac_addr, f->addr.addr, ETH_ALEN);
-
-			/* due to ABI compat need to split into hi/lo */
-			fe->port_no = f->dst->port_no;
-			fe->port_hi = f->dst->port_no >> 8;
-
-			fe->is_local = f->is_local;
-			if (!f->is_static)
-				fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
-			++fe;
-			++num;
+		if (skip) {
+			--skip;
+			continue;
 		}
-	}
 
- out:
+		/* convert from internal format to API */
+		memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
+
+		/* due to ABI compat need to split into hi/lo */
+		fe->port_no = f->dst->port_no;
+		fe->port_hi = f->dst->port_no >> 8;
+
+		fe->is_local = f->is_local;
+		if (!f->is_static)
+			fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
+		++fe;
+		++num;
+	}
 	rcu_read_unlock();
 
 	return num;
 }
 
-static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
+static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
 					       struct net_bridge_port *source,
 					       const unsigned char *addr,
 					       __u16 vid,
@@ -489,16 +471,23 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
 
 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
 	if (fdb) {
-		memcpy(fdb->addr.addr, addr, ETH_ALEN);
+		memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
 		fdb->dst = source;
-		fdb->vlan_id = vid;
+		fdb->key.vlan_id = vid;
 		fdb->is_local = is_local;
 		fdb->is_static = is_static;
 		fdb->added_by_user = 0;
 		fdb->added_by_external_learn = 0;
 		fdb->offloaded = 0;
 		fdb->updated = fdb->used = jiffies;
-		hlist_add_head_rcu(&fdb->hlist, head);
+		if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
+						  &fdb->rhnode,
+						  br_fdb_rht_params)) {
+			kmem_cache_free(br_fdb_cache, fdb);
+			fdb = NULL;
+		} else {
+			hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
+		}
 	}
 	return fdb;
 }
@@ -506,7 +495,6 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		  const unsigned char *addr, u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 
 	if (!is_valid_ether_addr(addr))
@@ -524,7 +512,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 		fdb_delete(br, fdb);
 	}
 
-	fdb = fdb_create(head, source, addr, vid, 1, 1);
+	fdb = fdb_create(br, source, addr, vid, 1, 1);
 	if (!fdb)
 		return -ENOMEM;
 
@@ -548,7 +536,6 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		   const unsigned char *addr, u16 vid, bool added_by_user)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 	bool fdb_modified = false;
 
@@ -561,7 +548,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 	      source->state == BR_STATE_FORWARDING))
 		return;
 
-	fdb = fdb_find_rcu(head, addr, vid);
+	fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
 	if (likely(fdb)) {
 		/* attempt to update an entry for a local interface */
 		if (unlikely(fdb->is_local)) {
@@ -590,14 +577,13 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		}
 	} else {
 		spin_lock(&br->hash_lock);
-		if (likely(!fdb_find_rcu(head, addr, vid))) {
-			fdb = fdb_create(head, source, addr, vid, 0, 0);
-			if (fdb) {
-				if (unlikely(added_by_user))
-					fdb->added_by_user = 1;
-				trace_br_fdb_update(br, source, addr, vid, added_by_user);
-				fdb_notify(br, fdb, RTM_NEWNEIGH);
-			}
+		fdb = fdb_create(br, source, addr, vid, 0, 0);
+		if (fdb) {
+			if (unlikely(added_by_user))
+				fdb->added_by_user = 1;
+			trace_br_fdb_update(br, source, addr, vid,
+					    added_by_user);
+			fdb_notify(br, fdb, RTM_NEWNEIGH);
 		}
 		/* else  we lose race and someone else inserts
 		 * it first, don't bother updating
@@ -646,7 +632,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
 	if (fdb->added_by_external_learn)
 		ndm->ndm_flags |= NTF_EXT_LEARNED;
 
-	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
+	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
 		goto nla_put_failure;
 	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
 		goto nla_put_failure;
@@ -657,7 +643,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
 	if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
 		goto nla_put_failure;
 
-	if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
+	if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
+					&fdb->key.vlan_id))
 		goto nla_put_failure;
 
 	nlmsg_end(skb, nlh);
@@ -711,54 +698,48 @@ int br_fdb_dump(struct sk_buff *skb,
 		int *idx)
 {
 	struct net_bridge *br = netdev_priv(dev);
+	struct net_bridge_fdb_entry *f;
 	int err = 0;
-	int i;
 
 	if (!(dev->priv_flags & IFF_EBRIDGE))
-		goto out;
+		return err;
 
 	if (!filter_dev) {
 		err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
 		if (err < 0)
-			goto out;
+			return err;
 	}
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		struct net_bridge_fdb_entry *f;
-
-		hlist_for_each_entry_rcu(f, &br->hash[i], hlist) {
-
-			if (*idx < cb->args[2])
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		if (*idx < cb->args[2])
+			goto skip;
+		if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
+			if (filter_dev != dev)
 				goto skip;
-
-			if (filter_dev &&
-			    (!f->dst || f->dst->dev != filter_dev)) {
-				if (filter_dev != dev)
-					goto skip;
-				/* !f->dst is a special case for bridge
-				 * It means the MAC belongs to the bridge
-				 * Therefore need a little more filtering
-				 * we only want to dump the !f->dst case
-				 */
-				if (f->dst)
-					goto skip;
-			}
-			if (!filter_dev && f->dst)
+			/* !f->dst is a special case for bridge
+			 * It means the MAC belongs to the bridge
+			 * Therefore need a little more filtering
+			 * we only want to dump the !f->dst case
+			 */
+			if (f->dst)
 				goto skip;
-
-			err = fdb_fill_info(skb, br, f,
-					    NETLINK_CB(cb->skb).portid,
-					    cb->nlh->nlmsg_seq,
-					    RTM_NEWNEIGH,
-					    NLM_F_MULTI);
-			if (err < 0)
-				goto out;
-skip:
-			*idx += 1;
 		}
-	}
+		if (!filter_dev && f->dst)
+			goto skip;
 
-out:
+		err = fdb_fill_info(skb, br, f,
+				    NETLINK_CB(cb->skb).portid,
+				    cb->nlh->nlmsg_seq,
+				    RTM_NEWNEIGH,
+				    NLM_F_MULTI);
+		if (err < 0)
+			break;
+skip:
+		*idx += 1;
+	}
+	rcu_read_unlock();
+
 	return err;
 }
 
@@ -766,7 +747,6 @@ int br_fdb_dump(struct sk_buff *skb,
 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 			 const __u8 *addr, __u16 state, __u16 flags, __u16 vid)
 {
-	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
 	struct net_bridge_fdb_entry *fdb;
 	bool modified = false;
 
@@ -787,7 +767,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 		if (!(flags & NLM_F_CREATE))
 			return -ENOENT;
 
-		fdb = fdb_create(head, source, addr, vid, 0, 0);
+		fdb = fdb_create(br, source, addr, vid, 0, 0);
 		if (!fdb)
 			return -ENOMEM;
 
@@ -1012,65 +992,60 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
 {
-	struct net_bridge_fdb_entry *fdb, *tmp;
-	int i;
+	struct net_bridge_fdb_entry *f, *tmp;
 	int err;
 
 	ASSERT_RTNL();
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry(fdb, &br->hash[i], hlist) {
-			/* We only care for static entries */
-			if (!fdb->is_static)
-				continue;
-
-			err = dev_uc_add(p->dev, fdb->addr.addr);
-			if (err)
-				goto rollback;
-		}
+	/* the key here is that static entries change only under rtnl */
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!f->is_static)
+			continue;
+		err = dev_uc_add(p->dev, f->key.addr.addr);
+		if (err)
+			goto rollback;
 	}
-	return 0;
+done:
+	rcu_read_unlock();
+
+	return err;
 
 rollback:
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry(tmp, &br->hash[i], hlist) {
-			/* If we reached the fdb that failed, we can stop */
-			if (tmp == fdb)
-				break;
-
-			/* We only care for static entries */
-			if (!tmp->is_static)
-				continue;
-
-			dev_uc_del(p->dev, tmp->addr.addr);
-		}
+	hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!tmp->is_static)
+			continue;
+		if (tmp == f)
+			break;
+		dev_uc_del(p->dev, tmp->key.addr.addr);
 	}
-	return err;
+
+	goto done;
 }
 
 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
 {
-	struct net_bridge_fdb_entry *fdb;
-	int i;
+	struct net_bridge_fdb_entry *f;
 
 	ASSERT_RTNL();
 
-	for (i = 0; i < BR_HASH_SIZE; i++) {
-		hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) {
-			/* We only care for static entries */
-			if (!fdb->is_static)
-				continue;
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
+		/* We only care for static entries */
+		if (!f->is_static)
+			continue;
 
-			dev_uc_del(p->dev, fdb->addr.addr);
-		}
+		dev_uc_del(p->dev, f->key.addr.addr);
 	}
+	rcu_read_unlock();
 }
 
 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 			      const unsigned char *addr, u16 vid)
 {
 	struct net_bridge_fdb_entry *fdb;
-	struct hlist_head *head;
 	bool modified = false;
 	int err = 0;
 
@@ -1078,10 +1053,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 
 	spin_lock_bh(&br->hash_lock);
 
-	head = &br->hash[br_mac_hash(addr, vid)];
 	fdb = br_fdb_find(br, addr, vid);
 	if (!fdb) {
-		fdb = fdb_create(head, p, addr, vid, 0, 0);
+		fdb = fdb_create(br, p, addr, vid, 0, 0);
 		if (!fdb) {
 			err = -ENOMEM;
 			goto err_unlock;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index b0f4c73..6d9f48b 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -760,9 +760,9 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 void br_mdb_init(void)
 {
-	rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
-	rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
-	rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
+	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
 }
 
 void br_mdb_uninit(void)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c2eea1b..27f1d4f 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -991,7 +991,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
 	unsigned int i;
 	int ret;
 
-	e = rcu_dereference(net->nf.hooks[NFPROTO_BRIDGE][hook]);
+	e = rcu_dereference(net->nf.hooks_bridge[hook]);
 	if (!e)
 		return okfn(net, sk, skb);
 
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
index 20cbb72..8e2d7cf 100644
--- a/net/bridge/br_nf_core.c
+++ b/net/bridge/br_nf_core.c
@@ -78,7 +78,6 @@ void br_netfilter_rtable_init(struct net_bridge *br)
 
 	atomic_set(&rt->dst.__refcnt, 1);
 	rt->dst.dev = br->dev;
-	rt->dst.path = &rt->dst;
 	dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
 	rt->dst.flags	= DST_NOXFRM | DST_FAKE_RTABLE;
 	rt->dst.ops = &fake_dst_ops;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1312b8d..80559fd 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -168,12 +168,17 @@ struct net_bridge_vlan_group {
 	u16				pvid;
 };
 
+struct net_bridge_fdb_key {
+	mac_addr addr;
+	u16 vlan_id;
+};
+
 struct net_bridge_fdb_entry {
-	struct hlist_node		hlist;
+	struct rhash_head		rhnode;
 	struct net_bridge_port		*dst;
 
-	mac_addr			addr;
-	__u16				vlan_id;
+	struct net_bridge_fdb_key	key;
+	struct hlist_node		fdb_node;
 	unsigned char			is_local:1,
 					is_static:1,
 					added_by_user:1,
@@ -315,7 +320,7 @@ struct net_bridge {
 	struct net_bridge_vlan_group	__rcu *vlgrp;
 #endif
 
-	struct hlist_head		hash[BR_HASH_SIZE];
+	struct rhashtable		fdb_hash_tbl;
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 	union {
 		struct rtable		fake_rtable;
@@ -405,6 +410,7 @@ struct net_bridge {
 	int offload_fwd_mark;
 #endif
 	bool				neigh_suppress_enabled;
+	struct hlist_head		fdb_list;
 };
 
 struct br_input_skb_cb {
@@ -515,6 +521,8 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
 /* br_fdb.c */
 int br_fdb_init(void);
 void br_fdb_fini(void);
+int br_fdb_hash_init(struct net_bridge *br);
+void br_fdb_hash_fini(struct net_bridge *br);
 void br_fdb_flush(struct net_bridge *br);
 void br_fdb_find_delete_local(struct net_bridge *br,
 			      const struct net_bridge_port *p,
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index 9700e0f..ee775f4 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -121,13 +121,13 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
 
 	switch (type) {
 	case RTM_DELNEIGH:
-		br_switchdev_fdb_call_notifiers(false, fdb->addr.addr,
-						fdb->vlan_id,
+		br_switchdev_fdb_call_notifiers(false, fdb->key.addr.addr,
+						fdb->key.vlan_id,
 						fdb->dst->dev);
 		break;
 	case RTM_NEWNEIGH:
-		br_switchdev_fdb_call_notifiers(true, fdb->addr.addr,
-						fdb->vlan_id,
+		br_switchdev_fdb_call_notifiers(true, fdb->key.addr.addr,
+						fdb->key.vlan_id,
 						fdb->dst->dev);
 		break;
 	}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 723f25e..b1be0dc 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -272,10 +272,7 @@ static ssize_t group_addr_show(struct device *d,
 			       struct device_attribute *attr, char *buf)
 {
 	struct net_bridge *br = to_bridge(d);
-	return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
-		       br->group_addr[0], br->group_addr[1],
-		       br->group_addr[2], br->group_addr[3],
-		       br->group_addr[4], br->group_addr[5]);
+	return sprintf(buf, "%pM\n", br->group_addr);
 }
 
 static ssize_t group_addr_store(struct device *d,
@@ -284,14 +281,11 @@ static ssize_t group_addr_store(struct device *d,
 {
 	struct net_bridge *br = to_bridge(d);
 	u8 new_addr[6];
-	int i;
 
 	if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
-		   &new_addr[0], &new_addr[1], &new_addr[2],
-		   &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
+	if (!mac_pton(buf, new_addr))
 		return -EINVAL;
 
 	if (!is_link_local_ether_addr(new_addr))
@@ -306,8 +300,7 @@ static ssize_t group_addr_store(struct device *d,
 		return restart_syscall();
 
 	spin_lock_bh(&br->lock);
-	for (i = 0; i < 6; i++)
-		br->group_addr[i] = new_addr[i];
+	ether_addr_copy(br->group_addr, new_addr);
 	spin_unlock_bh(&br->lock);
 
 	br->group_addr_set = true;
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index e7ef1a1..225d166 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -4,6 +4,7 @@
 #
 menuconfig NF_TABLES_BRIDGE
 	depends on BRIDGE && NETFILTER && NF_TABLES
+	select NETFILTER_FAMILY_BRIDGE
 	tristate "Ethernet Bridge nf_tables support"
 
 if NF_TABLES_BRIDGE
@@ -29,6 +30,7 @@
 menuconfig BRIDGE_NF_EBTABLES
 	tristate "Ethernet Bridge tables (ebtables) support"
 	depends on BRIDGE && NETFILTER && NETFILTER_XTABLES
+	select NETFILTER_FAMILY_BRIDGE
 	help
 	  ebtables is a general, extensible frame/packet identification
 	  framework. Say 'Y' or 'M' here if you want to do Ethernet
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 97afdc0..86774b5 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -25,15 +25,17 @@ nft_do_chain_bridge(void *priv,
 {
 	struct nft_pktinfo pkt;
 
+	nft_set_pktinfo(&pkt, skb, state);
+
 	switch (eth_hdr(skb)->h_proto) {
 	case htons(ETH_P_IP):
-		nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv4_validate(&pkt, skb);
 		break;
 	case htons(ETH_P_IPV6):
-		nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv6_validate(&pkt, skb);
 		break;
 	default:
-		nft_set_pktinfo_unspec(&pkt, skb, state);
+		nft_set_pktinfo_unspec(&pkt, skb);
 		break;
 	}
 
@@ -44,14 +46,6 @@ static struct nft_af_info nft_af_bridge __read_mostly = {
 	.family		= NFPROTO_BRIDGE,
 	.nhooks		= NF_BR_NUMHOOKS,
 	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_BR_PRE_ROUTING]	= nft_do_chain_bridge,
-		[NF_BR_LOCAL_IN]	= nft_do_chain_bridge,
-		[NF_BR_FORWARD]		= nft_do_chain_bridge,
-		[NF_BR_LOCAL_OUT]	= nft_do_chain_bridge,
-		[NF_BR_POST_ROUTING]	= nft_do_chain_bridge,
-	},
 };
 
 static int nf_tables_bridge_init_net(struct net *net)
@@ -92,67 +86,32 @@ static const struct nf_chain_type filter_bridge = {
 			  (1 << NF_BR_FORWARD) |
 			  (1 << NF_BR_LOCAL_OUT) |
 			  (1 << NF_BR_POST_ROUTING),
-};
-
-static void nf_br_saveroute(const struct sk_buff *skb,
-			    struct nf_queue_entry *entry)
-{
-}
-
-static int nf_br_reroute(struct net *net, struct sk_buff *skb,
-			 const struct nf_queue_entry *entry)
-{
-	return 0;
-}
-
-static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook,
-			      unsigned int dataoff, u_int8_t protocol)
-{
-	return 0;
-}
-
-static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook,
-				      unsigned int dataoff, unsigned int len,
-				      u_int8_t protocol)
-{
-	return 0;
-}
-
-static int nf_br_route(struct net *net, struct dst_entry **dst,
-		       struct flowi *fl, bool strict __always_unused)
-{
-	return 0;
-}
-
-static const struct nf_afinfo nf_br_afinfo = {
-	.family                 = AF_BRIDGE,
-	.checksum               = nf_br_checksum,
-	.checksum_partial       = nf_br_checksum_partial,
-	.route                  = nf_br_route,
-	.saveroute              = nf_br_saveroute,
-	.reroute                = nf_br_reroute,
-	.route_key_size         = 0,
+	.hooks		= {
+		[NF_BR_PRE_ROUTING]	= nft_do_chain_bridge,
+		[NF_BR_LOCAL_IN]	= nft_do_chain_bridge,
+		[NF_BR_FORWARD]		= nft_do_chain_bridge,
+		[NF_BR_LOCAL_OUT]	= nft_do_chain_bridge,
+		[NF_BR_POST_ROUTING]	= nft_do_chain_bridge,
+	},
 };
 
 static int __init nf_tables_bridge_init(void)
 {
 	int ret;
 
-	nf_register_afinfo(&nf_br_afinfo);
 	ret = nft_register_chain_type(&filter_bridge);
 	if (ret < 0)
-		goto err1;
+		return ret;
 
 	ret = register_pernet_subsys(&nf_tables_bridge_net_ops);
 	if (ret < 0)
-		goto err2;
+		goto err_register_subsys;
 
 	return ret;
 
-err2:
+err_register_subsys:
 	nft_unregister_chain_type(&filter_bridge);
-err1:
-	nf_unregister_afinfo(&nf_br_afinfo);
+
 	return ret;
 }
 
@@ -160,7 +119,6 @@ static void __exit nf_tables_bridge_exit(void)
 {
 	unregister_pernet_subsys(&nf_tables_bridge_net_ops);
 	nft_unregister_chain_type(&filter_bridge);
-	nf_unregister_afinfo(&nf_br_afinfo);
 }
 
 module_init(nf_tables_bridge_init);
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 71b6ab2..38c2b7a 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -8,7 +8,6 @@
 
 #include <linux/string.h>
 #include <linux/skbuff.h>
-#include <linux/hardirq.h>
 #include <linux/export.h>
 #include <net/caif/cfpkt.h>
 
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 922ac1d6..53ecda1 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -8,7 +8,6 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
 
 #include <linux/fs.h>
-#include <linux/hardirq.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
diff --git a/net/can/gw.c b/net/can/gw.c
index 73a02af..398dd03 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1014,6 +1014,8 @@ static struct pernet_operations cangw_pernet_ops = {
 
 static __init int cgw_module_init(void)
 {
+	int ret;
+
 	/* sanitize given module parameter */
 	max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
 
@@ -1031,15 +1033,19 @@ static __init int cgw_module_init(void)
 	notifier.notifier_call = cgw_notifier;
 	register_netdevice_notifier(&notifier);
 
-	if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, 0)) {
+	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
+				   NULL, cgw_dump_jobs, 0);
+	if (ret) {
 		unregister_netdevice_notifier(&notifier);
 		kmem_cache_destroy(cgw_cache);
 		return -ENOBUFS;
 	}
 
-	/* Only the first call to __rtnl_register can fail */
-	__rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, 0);
-	__rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, 0);
+	/* Only the first call to rtnl_register_module can fail */
+	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+			     cgw_create_job, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+			     cgw_remove_job, NULL, 0);
 
 	return 0;
 }
diff --git a/net/core/Makefile b/net/core/Makefile
index 1fd0a9c..6dbbba8 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,7 @@
 obj-y		     += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
 			neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
 			sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
-			fib_notifier.o
+			fib_notifier.o xdp.o
 
 obj-y += net-sysfs.o
 obj-$(CONFIG_PROC_FS) += net-procfs.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 0e0ba36..3d24d9a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1554,6 +1554,23 @@ void dev_disable_lro(struct net_device *dev)
 }
 EXPORT_SYMBOL(dev_disable_lro);
 
+/**
+ *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
+ *	@dev: device
+ *
+ *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
+ *	called under RTNL.  This is needed if Generic XDP is installed on
+ *	the device.
+ */
+static void dev_disable_gro_hw(struct net_device *dev)
+{
+	dev->wanted_features &= ~NETIF_F_GRO_HW;
+	netdev_update_features(dev);
+
+	if (unlikely(dev->features & NETIF_F_GRO_HW))
+		netdev_WARN(dev, "failed to disable GRO_HW!\n");
+}
+
 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
 				   struct net_device *dev)
 {
@@ -2815,7 +2832,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 
 	segs = skb_mac_gso_segment(skb, features);
 
-	if (unlikely(skb_needs_check(skb, tx_path)))
+	if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
 		skb_warn_bad_offload(skb);
 
 	return segs;
@@ -3054,7 +3071,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
 }
 EXPORT_SYMBOL(skb_csum_hwoffload_help);
 
-static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
 	netdev_features_t features;
 
@@ -3078,9 +3095,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 		    __skb_linearize(skb))
 			goto out_kfree_skb;
 
-		if (validate_xmit_xfrm(skb, features))
-			goto out_kfree_skb;
-
 		/* If packet is not checksummed and device does not
 		 * support checksumming for this protocol, complete
 		 * checksumming here.
@@ -3097,6 +3111,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 		}
 	}
 
+	skb = validate_xmit_xfrm(skb, features, again);
+
 	return skb;
 
 out_kfree_skb:
@@ -3106,7 +3122,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
 	return NULL;
 }
 
-struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
 {
 	struct sk_buff *next, *head = NULL, *tail;
 
@@ -3117,7 +3133,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
 		/* in case skb wont be segmented, point to itself */
 		skb->prev = skb;
 
-		skb = validate_xmit_skb(skb, dev);
+		skb = validate_xmit_skb(skb, dev, again);
 		if (!skb)
 			continue;
 
@@ -3174,6 +3190,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 	int rc;
 
 	qdisc_calculate_pkt_len(skb, q);
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+			__qdisc_drop(skb, &to_free);
+			rc = NET_XMIT_DROP;
+		} else {
+			rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+			__qdisc_run(q);
+		}
+
+		if (unlikely(to_free))
+			kfree_skb_list(to_free);
+		return rc;
+	}
+
 	/*
 	 * Heuristic to force contended enqueues to serialize on a
 	 * separate lock before trying to get qdisc main lock.
@@ -3204,9 +3235,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 				contended = false;
 			}
 			__qdisc_run(q);
-		} else
-			qdisc_run_end(q);
+		}
 
+		qdisc_run_end(q);
 		rc = NET_XMIT_SUCCESS;
 	} else {
 		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
@@ -3216,6 +3247,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 				contended = false;
 			}
 			__qdisc_run(q);
+			qdisc_run_end(q);
 		}
 	}
 	spin_unlock(root_lock);
@@ -3428,6 +3460,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 	struct netdev_queue *txq;
 	struct Qdisc *q;
 	int rc = -ENOMEM;
+	bool again = false;
 
 	skb_reset_mac_header(skb);
 
@@ -3489,7 +3522,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 				     XMIT_RECURSION_LIMIT))
 				goto recursion_alert;
 
-			skb = validate_xmit_skb(skb, dev);
+			skb = validate_xmit_skb(skb, dev, &again);
 			if (!skb)
 				goto out;
 
@@ -3885,9 +3918,33 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
 	return NET_RX_DROP;
 }
 
+static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	struct netdev_rx_queue *rxqueue;
+
+	rxqueue = dev->_rx;
+
+	if (skb_rx_queue_recorded(skb)) {
+		u16 index = skb_get_rx_queue(skb);
+
+		if (unlikely(index >= dev->real_num_rx_queues)) {
+			WARN_ONCE(dev->real_num_rx_queues > 1,
+				  "%s received packet on queue %u, but number "
+				  "of RX queues is %u\n",
+				  dev->name, index, dev->real_num_rx_queues);
+
+			return rxqueue; /* Return first rxqueue */
+		}
+		rxqueue += index;
+	}
+	return rxqueue;
+}
+
 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
 				     struct bpf_prog *xdp_prog)
 {
+	struct netdev_rx_queue *rxqueue;
 	u32 metalen, act = XDP_DROP;
 	struct xdp_buff xdp;
 	void *orig_data;
@@ -3931,6 +3988,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
 	xdp.data_hard_start = skb->data - skb_headroom(skb);
 	orig_data = xdp.data;
 
+	rxqueue = netif_get_rxqueue(skb);
+	xdp.rxq = &rxqueue->xdp_rxq;
+
 	act = bpf_prog_run_xdp(xdp_prog, &xdp);
 
 	off = xdp.data - orig_data;
@@ -4155,21 +4215,26 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
 
 		while (head) {
 			struct Qdisc *q = head;
-			spinlock_t *root_lock;
+			spinlock_t *root_lock = NULL;
 
 			head = head->next_sched;
 
-			root_lock = qdisc_lock(q);
-			spin_lock(root_lock);
+			if (!(q->flags & TCQ_F_NOLOCK)) {
+				root_lock = qdisc_lock(q);
+				spin_lock(root_lock);
+			}
 			/* We need to make sure head->next_sched is read
 			 * before clearing __QDISC_STATE_SCHED
 			 */
 			smp_mb__before_atomic();
 			clear_bit(__QDISC_STATE_SCHED, &q->state);
 			qdisc_run(q);
-			spin_unlock(root_lock);
+			if (root_lock)
+				spin_unlock(root_lock);
 		}
 	}
+
+	xfrm_dev_backlog(sd);
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@ -4557,6 +4622,7 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
 		} else if (new && !old) {
 			static_key_slow_inc(&generic_xdp_needed);
 			dev_disable_lro(dev);
+			dev_disable_gro_hw(dev);
 		}
 		break;
 
@@ -7085,17 +7151,21 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
 }
 EXPORT_SYMBOL(dev_change_proto_down);
 
-u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
+void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
+		     struct netdev_bpf *xdp)
+{
+	memset(xdp, 0, sizeof(*xdp));
+	xdp->command = XDP_QUERY_PROG;
+
+	/* Query must always succeed. */
+	WARN_ON(bpf_op(dev, xdp) < 0);
+}
+
+static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
 {
 	struct netdev_bpf xdp;
 
-	memset(&xdp, 0, sizeof(xdp));
-	xdp.command = XDP_QUERY_PROG;
-
-	/* Query must always succeed. */
-	WARN_ON(bpf_op(dev, &xdp) < 0);
-	if (prog_id)
-		*prog_id = xdp.prog_id;
+	__dev_xdp_query(dev, bpf_op, &xdp);
 
 	return xdp.prog_attached;
 }
@@ -7118,6 +7188,27 @@ static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
 	return bpf_op(dev, &xdp);
 }
 
+static void dev_xdp_uninstall(struct net_device *dev)
+{
+	struct netdev_bpf xdp;
+	bpf_op_t ndo_bpf;
+
+	/* Remove generic XDP */
+	WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
+
+	/* Remove from the driver */
+	ndo_bpf = dev->netdev_ops->ndo_bpf;
+	if (!ndo_bpf)
+		return;
+
+	__dev_xdp_query(dev, ndo_bpf, &xdp);
+	if (xdp.prog_attached == XDP_ATTACHED_NONE)
+		return;
+
+	/* Program removal should always succeed */
+	WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
+}
+
 /**
  *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
  *	@dev: device
@@ -7146,10 +7237,10 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
 		bpf_chk = generic_xdp_install;
 
 	if (fd >= 0) {
-		if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
+		if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
 			return -EEXIST;
 		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
-		    __dev_xdp_attached(dev, bpf_op, NULL))
+		    __dev_xdp_attached(dev, bpf_op))
 			return -EBUSY;
 
 		prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
@@ -7248,6 +7339,7 @@ static void rollback_registered_many(struct list_head *head)
 		/* Shutdown queueing discipline. */
 		dev_shutdown(dev);
 
+		dev_xdp_uninstall(dev);
 
 		/* Notify protocols, that we are about to destroy
 		 * this device. They should clean all the things.
@@ -7391,6 +7483,18 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
 		features &= ~dev->gso_partial_features;
 	}
 
+	if (!(features & NETIF_F_RXCSUM)) {
+		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
+		 * successfully merged by hardware must also have the
+		 * checksum verified by hardware.  If the user does not
+		 * want to enable RXCSUM, logically, we should disable GRO_HW.
+		 */
+		if (features & NETIF_F_GRO_HW) {
+			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
+			features &= ~NETIF_F_GRO_HW;
+		}
+	}
+
 	return features;
 }
 
@@ -7524,12 +7628,12 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 }
 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
 
-#ifdef CONFIG_SYSFS
 static int netif_alloc_rx_queues(struct net_device *dev)
 {
 	unsigned int i, count = dev->num_rx_queues;
 	struct netdev_rx_queue *rx;
 	size_t sz = count * sizeof(*rx);
+	int err = 0;
 
 	BUG_ON(count < 1);
 
@@ -7539,11 +7643,38 @@ static int netif_alloc_rx_queues(struct net_device *dev)
 
 	dev->_rx = rx;
 
-	for (i = 0; i < count; i++)
+	for (i = 0; i < count; i++) {
 		rx[i].dev = dev;
+
+		/* XDP RX-queue setup */
+		err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
+		if (err < 0)
+			goto err_rxq_info;
+	}
 	return 0;
+
+err_rxq_info:
+	/* Rollback successful reg's and free other resources */
+	while (i--)
+		xdp_rxq_info_unreg(&rx[i].xdp_rxq);
+	kvfree(dev->_rx);
+	dev->_rx = NULL;
+	return err;
 }
-#endif
+
+static void netif_free_rx_queues(struct net_device *dev)
+{
+	unsigned int i, count = dev->num_rx_queues;
+
+	/* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
+	if (!dev->_rx)
+		return;
+
+	for (i = 0; i < count; i++)
+		xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
+
+	kvfree(dev->_rx);
+}
 
 static void netdev_init_one_queue(struct net_device *dev,
 				  struct netdev_queue *queue, void *_unused)
@@ -8104,12 +8235,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 		return NULL;
 	}
 
-#ifdef CONFIG_SYSFS
 	if (rxqs < 1) {
 		pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
 		return NULL;
 	}
-#endif
 
 	alloc_size = sizeof(struct net_device);
 	if (sizeof_priv) {
@@ -8166,12 +8295,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 	if (netif_alloc_netdev_queues(dev))
 		goto free_all;
 
-#ifdef CONFIG_SYSFS
 	dev->num_rx_queues = rxqs;
 	dev->real_num_rx_queues = rxqs;
 	if (netif_alloc_rx_queues(dev))
 		goto free_all;
-#endif
 
 	strcpy(dev->name, name);
 	dev->name_assign_type = name_assign_type;
@@ -8207,13 +8334,10 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
 void free_netdev(struct net_device *dev)
 {
 	struct napi_struct *p, *n;
-	struct bpf_prog *prog;
 
 	might_sleep();
 	netif_free_tx_queues(dev);
-#ifdef CONFIG_SYSFS
-	kvfree(dev->_rx);
-#endif
+	netif_free_rx_queues(dev);
 
 	kfree(rcu_dereference_protected(dev->ingress_queue, 1));
 
@@ -8226,12 +8350,6 @@ void free_netdev(struct net_device *dev)
 	free_percpu(dev->pcpu_refcnt);
 	dev->pcpu_refcnt = NULL;
 
-	prog = rcu_dereference_protected(dev->xdp_prog, 1);
-	if (prog) {
-		bpf_prog_put(prog);
-		static_key_slow_dec(&generic_xdp_needed);
-	}
-
 	/*  Compatibility with error handling in drivers */
 	if (dev->reg_state == NETREG_UNINITIALIZED) {
 		netdev_freemem(dev);
@@ -8819,6 +8937,9 @@ static int __init net_dev_init(void)
 
 		skb_queue_head_init(&sd->input_pkt_queue);
 		skb_queue_head_init(&sd->process_queue);
+#ifdef CONFIG_XFRM_OFFLOAD
+		skb_queue_head_init(&sd->xfrm_backlog);
+#endif
 		INIT_LIST_HEAD(&sd->poll_list);
 		sd->output_queue_tailp = &sd->output_queue;
 #ifdef CONFIG_RPS
diff --git a/net/core/dst.c b/net/core/dst.c
index 662a2d4..007aa0b 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -21,6 +21,7 @@
 #include <linux/sched.h>
 #include <linux/prefetch.h>
 #include <net/lwtunnel.h>
+#include <net/xfrm.h>
 
 #include <net/dst.h>
 #include <net/dst_metadata.h>
@@ -62,15 +63,12 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 	      struct net_device *dev, int initial_ref, int initial_obsolete,
 	      unsigned short flags)
 {
-	dst->child = NULL;
 	dst->dev = dev;
 	if (dev)
 		dev_hold(dev);
 	dst->ops = ops;
 	dst_init_metrics(dst, dst_default_metrics.metrics, true);
 	dst->expires = 0UL;
-	dst->path = dst;
-	dst->from = NULL;
 #ifdef CONFIG_XFRM
 	dst->xfrm = NULL;
 #endif
@@ -88,7 +86,6 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
 	dst->__use = 0;
 	dst->lastuse = jiffies;
 	dst->flags = flags;
-	dst->next = NULL;
 	if (!(flags & DST_NOCOUNT))
 		dst_entries_add(ops, 1);
 }
@@ -116,12 +113,17 @@ EXPORT_SYMBOL(dst_alloc);
 
 struct dst_entry *dst_destroy(struct dst_entry * dst)
 {
-	struct dst_entry *child;
+	struct dst_entry *child = NULL;
 
 	smp_rmb();
 
-	child = dst->child;
+#ifdef CONFIG_XFRM
+	if (dst->xfrm) {
+		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
 
+		child = xdst->child;
+	}
+#endif
 	if (!(dst->flags & DST_NOCOUNT))
 		dst_entries_add(dst->ops, -1);
 
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 8225416..107b122 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -73,6 +73,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
 	[NETIF_F_LLTX_BIT] =             "tx-lockless",
 	[NETIF_F_NETNS_LOCAL_BIT] =      "netns-local",
 	[NETIF_F_GRO_BIT] =              "rx-gro",
+	[NETIF_F_GRO_HW_BIT] =           "rx-gro-hw",
 	[NETIF_F_LRO_BIT] =              "rx-lro",
 
 	[NETIF_F_TSO_BIT] =              "tx-tcp-segmentation",
@@ -1692,14 +1693,23 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
 
 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
 {
-	struct ethtool_ringparam ringparam;
+	struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
 
-	if (!dev->ethtool_ops->set_ringparam)
+	if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
 		return -EOPNOTSUPP;
 
 	if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
 		return -EFAULT;
 
+	dev->ethtool_ops->get_ringparam(dev, &max);
+
+	/* ensure new ring parameters are within the maximums */
+	if (ringparam.rx_pending > max.rx_max_pending ||
+	    ringparam.rx_mini_pending > max.rx_mini_max_pending ||
+	    ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending ||
+	    ringparam.tx_pending > max.tx_max_pending)
+		return -EINVAL;
+
 	return dev->ethtool_ops->set_ringparam(dev, &ringparam);
 }
 
diff --git a/net/core/filter.c b/net/core/filter.c
index d339ef17..d4b190e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2682,8 +2682,9 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
 	return 0;
 }
 
-int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb,
-				struct bpf_prog *xdp_prog)
+static int xdp_do_generic_redirect_map(struct net_device *dev,
+				       struct sk_buff *skb,
+				       struct bpf_prog *xdp_prog)
 {
 	struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 	unsigned long map_owner = ri->map_owner;
@@ -3011,6 +3012,8 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
 	info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
 	if (flags & BPF_F_DONT_FRAGMENT)
 		info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
+	if (flags & BPF_F_ZERO_CSUM_TX)
+		info->key.tun_flags &= ~TUNNEL_CSUM;
 
 	info->key.tun_id = cpu_to_be64(from->tunnel_id);
 	info->key.tos = from->tunnel_tos;
@@ -3024,8 +3027,6 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
 				  IPV6_FLOWLABEL_MASK;
 	} else {
 		info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
-		if (flags & BPF_F_ZERO_CSUM_TX)
-			info->key.tun_flags &= ~TUNNEL_CSUM;
 	}
 
 	return 0;
@@ -4301,6 +4302,25 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
 				      si->dst_reg, si->src_reg,
 				      offsetof(struct xdp_buff, data_end));
 		break;
+	case offsetof(struct xdp_md, ingress_ifindex):
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct xdp_buff, rxq));
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
+				      si->dst_reg, si->dst_reg,
+				      offsetof(struct xdp_rxq_info, dev));
+		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+				      bpf_target_off(struct net_device,
+						     ifindex, 4, target_size));
+		break;
+	case offsetof(struct xdp_md, rx_queue_index):
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct xdp_buff, rxq));
+		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
+				      bpf_target_off(struct xdp_rxq_info,
+						queue_index, 4, target_size));
+		break;
 	}
 
 	return insn - insn_buf;
@@ -4435,6 +4455,42 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
 		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
 				      offsetof(struct sock_common, skc_num));
 		break;
+
+	case offsetof(struct bpf_sock_ops, is_fullsock):
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+						struct bpf_sock_ops_kern,
+						is_fullsock),
+				      si->dst_reg, si->src_reg,
+				      offsetof(struct bpf_sock_ops_kern,
+					       is_fullsock));
+		break;
+
+/* Helper macro for adding read access to tcp_sock fields. */
+#define SOCK_OPS_GET_TCP32(FIELD_NAME)					      \
+	do {								      \
+		BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD_NAME) != 4); \
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
+						struct bpf_sock_ops_kern,     \
+						is_fullsock),		      \
+				      si->dst_reg, si->src_reg,		      \
+				      offsetof(struct bpf_sock_ops_kern,      \
+					       is_fullsock));		      \
+		*insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);	      \
+		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(			      \
+						struct bpf_sock_ops_kern, sk),\
+				      si->dst_reg, si->src_reg,		      \
+				      offsetof(struct bpf_sock_ops_kern, sk));\
+		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,        \
+				      offsetof(struct tcp_sock, FIELD_NAME)); \
+	} while (0)
+
+	case offsetof(struct bpf_sock_ops, snd_cwnd):
+		SOCK_OPS_GET_TCP32(snd_cwnd);
+		break;
+
+	case offsetof(struct bpf_sock_ops, srtt_us):
+		SOCK_OPS_GET_TCP32(srtt_us);
+		break;
 	}
 	return insn - insn_buf;
 }
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 15ce300..02db7b1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -24,6 +24,7 @@
 #include <linux/tcp.h>
 #include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
+#include <uapi/linux/batadv_packet.h>
 
 static void dissector_set_key(struct flow_dissector *flow_dissector,
 			      enum flow_dissector_key_id key_id)
@@ -133,10 +134,10 @@ skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
 	ctrl->addr_type = type;
 }
 
-static void
-__skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
-			       struct flow_dissector *flow_dissector,
-			       void *target_container)
+void
+skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+			     struct flow_dissector *flow_dissector,
+			     void *target_container)
 {
 	struct ip_tunnel_info *info;
 	struct ip_tunnel_key *key;
@@ -212,6 +213,7 @@ __skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
 		tp->dst = key->tp_dst;
 	}
 }
+EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
 
 static enum flow_dissect_ret
 __skb_flow_dissect_mpls(const struct sk_buff *skb,
@@ -436,6 +438,57 @@ __skb_flow_dissect_gre(const struct sk_buff *skb,
 	return FLOW_DISSECT_RET_PROTO_AGAIN;
 }
 
+/**
+ * __skb_flow_dissect_batadv() - dissect batman-adv header
+ * @skb: sk_buff to with the batman-adv header
+ * @key_control: flow dissectors control key
+ * @data: raw buffer pointer to the packet, if NULL use skb->data
+ * @p_proto: pointer used to update the protocol to process next
+ * @p_nhoff: pointer used to update inner network header offset
+ * @hlen: packet header length
+ * @flags: any combination of FLOW_DISSECTOR_F_*
+ *
+ * ETH_P_BATMAN packets are tried to be dissected. Only
+ * &struct batadv_unicast packets are actually processed because they contain an
+ * inner ethernet header and are usually followed by actual network header. This
+ * allows the flow dissector to continue processing the packet.
+ *
+ * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
+ *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
+ *  otherwise FLOW_DISSECT_RET_OUT_BAD
+ */
+static enum flow_dissect_ret
+__skb_flow_dissect_batadv(const struct sk_buff *skb,
+			  struct flow_dissector_key_control *key_control,
+			  void *data, __be16 *p_proto, int *p_nhoff, int hlen,
+			  unsigned int flags)
+{
+	struct {
+		struct batadv_unicast_packet batadv_unicast;
+		struct ethhdr eth;
+	} *hdr, _hdr;
+
+	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
+				   &_hdr);
+	if (!hdr)
+		return FLOW_DISSECT_RET_OUT_BAD;
+
+	if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
+		return FLOW_DISSECT_RET_OUT_BAD;
+
+	if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
+		return FLOW_DISSECT_RET_OUT_BAD;
+
+	*p_proto = hdr->eth.h_proto;
+	*p_nhoff += sizeof(*hdr);
+
+	key_control->flags |= FLOW_DIS_ENCAPSULATION;
+	if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+		return FLOW_DISSECT_RET_OUT_GOOD;
+
+	return FLOW_DISSECT_RET_PROTO_AGAIN;
+}
+
 static void
 __skb_flow_dissect_tcp(const struct sk_buff *skb,
 		       struct flow_dissector *flow_dissector,
@@ -576,9 +629,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 					      FLOW_DISSECTOR_KEY_BASIC,
 					      target_container);
 
-	__skb_flow_dissect_tunnel_info(skb, flow_dissector,
-				       target_container);
-
 	if (dissector_uses_key(flow_dissector,
 			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
 		struct ethhdr *eth = eth_hdr(skb);
@@ -817,6 +867,11 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
 					       nhoff, hlen);
 		break;
 
+	case htons(ETH_P_BATMAN):
+		fdret = __skb_flow_dissect_batadv(skb, key_control, data,
+						  &proto, &nhoff, hlen, flags);
+		break;
+
 	default:
 		fdret = FLOW_DISSECT_RET_OUT_BAD;
 		break;
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 87f2855..b2b2323b 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -252,10 +252,10 @@ __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
 	}
 }
 
-static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
-				    const struct gnet_stats_queue __percpu *cpu,
-				    const struct gnet_stats_queue *q,
-				    __u32 qlen)
+void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+			     const struct gnet_stats_queue __percpu *cpu,
+			     const struct gnet_stats_queue *q,
+			     __u32 qlen)
 {
 	if (cpu) {
 		__gnet_stats_copy_queue_cpu(qstats, cpu);
@@ -269,6 +269,7 @@ static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
 
 	qstats->qlen = qlen;
 }
+EXPORT_SYMBOL(__gnet_stats_copy_queue);
 
 /**
  * gnet_stats_copy_queue - copy queue statistics into statistics TLV
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f95a150..b9ce241c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -399,7 +399,7 @@ struct pktgen_dev {
 	__u8	ipsmode;		/* IPSEC mode (config) */
 	__u8	ipsproto;		/* IPSEC type (config) */
 	__u32	spi;
-	struct dst_entry dst;
+	struct xfrm_dst xdst;
 	struct dst_ops dstops;
 #endif
 	char result[512];
@@ -2609,7 +2609,7 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
 	 * supports both transport/tunnel mode + ESP/AH type.
 	 */
 	if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0))
-		skb->_skb_refdst = (unsigned long)&pkt_dev->dst | SKB_DST_NOREF;
+		skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF;
 
 	rcu_read_lock_bh();
 	err = x->outer_mode->output(x, skb);
@@ -3742,10 +3742,10 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
 	 * performance under such circumstance.
 	 */
 	pkt_dev->dstops.family = AF_INET;
-	pkt_dev->dst.dev = pkt_dev->odev;
-	dst_init_metrics(&pkt_dev->dst, pktgen_dst_metrics, false);
-	pkt_dev->dst.child = &pkt_dev->dst;
-	pkt_dev->dst.ops = &pkt_dev->dstops;
+	pkt_dev->xdst.u.dst.dev = pkt_dev->odev;
+	dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false);
+	pkt_dev->xdst.child = &pkt_dev->xdst.u.dst;
+	pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops;
 #endif
 
 	return add_dev_to_thread(t, pkt_dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 778d7f0..16d644a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -62,7 +62,9 @@
 struct rtnl_link {
 	rtnl_doit_func		doit;
 	rtnl_dumpit_func	dumpit;
+	struct module		*owner;
 	unsigned int		flags;
+	struct rcu_head		rcu;
 };
 
 static DEFINE_MUTEX(rtnl_mutex);
@@ -127,8 +129,7 @@ bool lockdep_rtnl_is_held(void)
 EXPORT_SYMBOL(lockdep_rtnl_is_held);
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 
-static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
-static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
+static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
 
 static inline int rtm_msgindex(int msgtype)
 {
@@ -144,8 +145,101 @@ static inline int rtm_msgindex(int msgtype)
 	return msgindex;
 }
 
+static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
+{
+	struct rtnl_link **tab;
+
+	if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
+		protocol = PF_UNSPEC;
+
+	tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
+	if (!tab)
+		tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
+
+	return tab[msgtype];
+}
+
+static int rtnl_register_internal(struct module *owner,
+				  int protocol, int msgtype,
+				  rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+				  unsigned int flags)
+{
+	struct rtnl_link *link, *old;
+	struct rtnl_link __rcu **tab;
+	int msgindex;
+	int ret = -ENOBUFS;
+
+	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
+	msgindex = rtm_msgindex(msgtype);
+
+	rtnl_lock();
+	tab = rtnl_msg_handlers[protocol];
+	if (tab == NULL) {
+		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
+		if (!tab)
+			goto unlock;
+
+		/* ensures we see the 0 stores */
+		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
+	}
+
+	old = rtnl_dereference(tab[msgindex]);
+	if (old) {
+		link = kmemdup(old, sizeof(*old), GFP_KERNEL);
+		if (!link)
+			goto unlock;
+	} else {
+		link = kzalloc(sizeof(*link), GFP_KERNEL);
+		if (!link)
+			goto unlock;
+	}
+
+	WARN_ON(link->owner && link->owner != owner);
+	link->owner = owner;
+
+	WARN_ON(doit && link->doit && link->doit != doit);
+	if (doit)
+		link->doit = doit;
+	WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
+	if (dumpit)
+		link->dumpit = dumpit;
+
+	link->flags |= flags;
+
+	/* publish protocol:msgtype */
+	rcu_assign_pointer(tab[msgindex], link);
+	ret = 0;
+	if (old)
+		kfree_rcu(old, rcu);
+unlock:
+	rtnl_unlock();
+	return ret;
+}
+
 /**
- * __rtnl_register - Register a rtnetlink message type
+ * rtnl_register_module - Register a rtnetlink message type
+ *
+ * @owner: module registering the hook (THIS_MODULE)
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+ *
+ * Like rtnl_register, but for use by removable modules.
+ */
+int rtnl_register_module(struct module *owner,
+			 int protocol, int msgtype,
+			 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+			 unsigned int flags)
+{
+	return rtnl_register_internal(owner, protocol, msgtype,
+				      doit, dumpit, flags);
+}
+EXPORT_SYMBOL_GPL(rtnl_register_module);
+
+/**
+ * rtnl_register - Register a rtnetlink message type
  * @protocol: Protocol family or PF_UNSPEC
  * @msgtype: rtnetlink message type
  * @doit: Function pointer called for each request message
@@ -159,57 +253,19 @@ static inline int rtm_msgindex(int msgtype)
  * The special protocol family PF_UNSPEC may be used to define fallback
  * function pointers for the case when no entry for the specific protocol
  * family exists.
- *
- * Returns 0 on success or a negative error code.
- */
-int __rtnl_register(int protocol, int msgtype,
-		    rtnl_doit_func doit, rtnl_dumpit_func dumpit,
-		    unsigned int flags)
-{
-	struct rtnl_link *tab;
-	int msgindex;
-
-	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
-	msgindex = rtm_msgindex(msgtype);
-
-	tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
-	if (tab == NULL) {
-		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
-		if (tab == NULL)
-			return -ENOBUFS;
-
-		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
-	}
-
-	if (doit)
-		tab[msgindex].doit = doit;
-	if (dumpit)
-		tab[msgindex].dumpit = dumpit;
-	tab[msgindex].flags |= flags;
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(__rtnl_register);
-
-/**
- * rtnl_register - Register a rtnetlink message type
- *
- * Identical to __rtnl_register() but panics on failure. This is useful
- * as failure of this function is very unlikely, it can only happen due
- * to lack of memory when allocating the chain to store all message
- * handlers for a protocol. Meant for use in init functions where lack
- * of memory implies no sense in continuing.
  */
 void rtnl_register(int protocol, int msgtype,
 		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
 		   unsigned int flags)
 {
-	if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
-		panic("Unable to register rtnetlink message handler, "
-		      "protocol = %d, message type = %d\n",
-		      protocol, msgtype);
+	int err;
+
+	err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
+				     flags);
+	if (err)
+		pr_err("Unable to register rtnetlink message handler, "
+		       "protocol = %d, message type = %d\n", protocol, msgtype);
 }
-EXPORT_SYMBOL_GPL(rtnl_register);
 
 /**
  * rtnl_unregister - Unregister a rtnetlink message type
@@ -220,24 +276,25 @@ EXPORT_SYMBOL_GPL(rtnl_register);
  */
 int rtnl_unregister(int protocol, int msgtype)
 {
-	struct rtnl_link *handlers;
+	struct rtnl_link **tab, *link;
 	int msgindex;
 
 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 	msgindex = rtm_msgindex(msgtype);
 
 	rtnl_lock();
-	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
-	if (!handlers) {
+	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
+	if (!tab) {
 		rtnl_unlock();
 		return -ENOENT;
 	}
 
-	handlers[msgindex].doit = NULL;
-	handlers[msgindex].dumpit = NULL;
-	handlers[msgindex].flags = 0;
+	link = tab[msgindex];
+	rcu_assign_pointer(tab[msgindex], NULL);
 	rtnl_unlock();
 
+	kfree_rcu(link, rcu);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(rtnl_unregister);
@@ -251,20 +308,27 @@ EXPORT_SYMBOL_GPL(rtnl_unregister);
  */
 void rtnl_unregister_all(int protocol)
 {
-	struct rtnl_link *handlers;
+	struct rtnl_link **tab, *link;
+	int msgindex;
 
 	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
 
 	rtnl_lock();
-	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
+	tab = rtnl_msg_handlers[protocol];
 	RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
+	for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
+		link = tab[msgindex];
+		if (!link)
+			continue;
+
+		rcu_assign_pointer(tab[msgindex], NULL);
+		kfree_rcu(link, rcu);
+	}
 	rtnl_unlock();
 
 	synchronize_net();
 
-	while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1)
-		schedule();
-	kfree(handlers);
+	kfree(tab);
 }
 EXPORT_SYMBOL_GPL(rtnl_unregister_all);
 
@@ -840,6 +904,10 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
 			 nla_total_size_64bit(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_MULTICAST */
 			 nla_total_size_64bit(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_RX_DROPPED */
+			 nla_total_size_64bit(sizeof(__u64)) +
+			 /* IFLA_VF_STATS_TX_DROPPED */
+			 nla_total_size_64bit(sizeof(__u64)) +
 			 nla_total_size(sizeof(struct ifla_vf_trust)));
 		return size;
 	} else
@@ -1194,7 +1262,11 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
 			      vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
 	    nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
-			      vf_stats.multicast, IFLA_VF_STATS_PAD)) {
+			      vf_stats.multicast, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED,
+			      vf_stats.rx_dropped, IFLA_VF_STATS_PAD) ||
+	    nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED,
+			      vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) {
 		nla_nest_cancel(skb, vfstats);
 		goto nla_put_vf_failure;
 	}
@@ -1261,6 +1333,7 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
 {
 	const struct net_device_ops *ops = dev->netdev_ops;
 	const struct bpf_prog *generic_xdp_prog;
+	struct netdev_bpf xdp;
 
 	ASSERT_RTNL();
 
@@ -1273,7 +1346,10 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id)
 	if (!ops->ndo_bpf)
 		return XDP_ATTACHED_NONE;
 
-	return __dev_xdp_attached(dev, ops->ndo_bpf, prog_id);
+	__dev_xdp_query(dev, ops->ndo_bpf, &xdp);
+	*prog_id = xdp.prog_id;
+
+	return xdp.prog_attached;
 }
 
 static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1569,6 +1645,8 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
 	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
 	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
 	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
+	[IFLA_GSO_MAX_SEGS]	= { .type = NLA_U32 },
+	[IFLA_GSO_MAX_SIZE]	= { .type = NLA_U32 },
 	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
 	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
 	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
@@ -2219,6 +2297,34 @@ static int do_setlink(const struct sk_buff *skb,
 		}
 	}
 
+	if (tb[IFLA_GSO_MAX_SIZE]) {
+		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
+
+		if (max_size > GSO_MAX_SIZE) {
+			err = -EINVAL;
+			goto errout;
+		}
+
+		if (dev->gso_max_size ^ max_size) {
+			netif_set_gso_max_size(dev, max_size);
+			status |= DO_SETLINK_MODIFIED;
+		}
+	}
+
+	if (tb[IFLA_GSO_MAX_SEGS]) {
+		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
+
+		if (max_segs > GSO_MAX_SEGS) {
+			err = -EINVAL;
+			goto errout;
+		}
+
+		if (dev->gso_max_segs ^ max_segs) {
+			dev->gso_max_segs = max_segs;
+			status |= DO_SETLINK_MODIFIED;
+		}
+	}
+
 	if (tb[IFLA_OPERSTATE])
 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
 
@@ -2583,6 +2689,10 @@ struct net_device *rtnl_create_link(struct net *net,
 		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
 	if (tb[IFLA_GROUP])
 		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+	if (tb[IFLA_GSO_MAX_SIZE])
+		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
+	if (tb[IFLA_GSO_MAX_SEGS])
+		dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
 
 	return dev;
 }
@@ -2973,18 +3083,26 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
 		s_idx = 1;
 
 	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
+		struct rtnl_link **tab;
 		int type = cb->nlh->nlmsg_type-RTM_BASE;
-		struct rtnl_link *handlers;
+		struct rtnl_link *link;
 		rtnl_dumpit_func dumpit;
 
 		if (idx < s_idx || idx == PF_PACKET)
 			continue;
 
-		handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
-		if (!handlers)
+		if (type < 0 || type >= RTM_NR_MSGTYPES)
 			continue;
 
-		dumpit = READ_ONCE(handlers[type].dumpit);
+		tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
+		if (!tab)
+			continue;
+
+		link = tab[type];
+		if (!link)
+			continue;
+
+		dumpit = link->dumpit;
 		if (!dumpit)
 			continue;
 
@@ -4314,7 +4432,8 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 			     struct netlink_ext_ack *extack)
 {
 	struct net *net = sock_net(skb->sk);
-	struct rtnl_link *handlers;
+	struct rtnl_link *link;
+	struct module *owner;
 	int err = -EOPNOTSUPP;
 	rtnl_doit_func doit;
 	unsigned int flags;
@@ -4338,79 +4457,85 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
 	if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (family >= ARRAY_SIZE(rtnl_msg_handlers))
-		family = PF_UNSPEC;
-
 	rcu_read_lock();
-	handlers = rcu_dereference(rtnl_msg_handlers[family]);
-	if (!handlers) {
-		family = PF_UNSPEC;
-		handlers = rcu_dereference(rtnl_msg_handlers[family]);
-	}
-
 	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
 		struct sock *rtnl;
 		rtnl_dumpit_func dumpit;
 		u16 min_dump_alloc = 0;
 
-		dumpit = READ_ONCE(handlers[type].dumpit);
-		if (!dumpit) {
+		link = rtnl_get_link(family, type);
+		if (!link || !link->dumpit) {
 			family = PF_UNSPEC;
-			handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
-			if (!handlers)
-				goto err_unlock;
-
-			dumpit = READ_ONCE(handlers[type].dumpit);
-			if (!dumpit)
+			link = rtnl_get_link(family, type);
+			if (!link || !link->dumpit)
 				goto err_unlock;
 		}
-
-		refcount_inc(&rtnl_msg_handlers_ref[family]);
+		owner = link->owner;
+		dumpit = link->dumpit;
 
 		if (type == RTM_GETLINK - RTM_BASE)
 			min_dump_alloc = rtnl_calcit(skb, nlh);
 
+		err = 0;
+		/* need to do this before rcu_read_unlock() */
+		if (!try_module_get(owner))
+			err = -EPROTONOSUPPORT;
+
 		rcu_read_unlock();
 
 		rtnl = net->rtnl;
-		{
+		if (err == 0) {
 			struct netlink_dump_control c = {
 				.dump		= dumpit,
 				.min_dump_alloc	= min_dump_alloc,
+				.module		= owner,
 			};
 			err = netlink_dump_start(rtnl, skb, nlh, &c);
+			/* netlink_dump_start() will keep a reference on
+			 * module if dump is still in progress.
+			 */
+			module_put(owner);
 		}
-		refcount_dec(&rtnl_msg_handlers_ref[family]);
 		return err;
 	}
 
-	doit = READ_ONCE(handlers[type].doit);
-	if (!doit) {
+	link = rtnl_get_link(family, type);
+	if (!link || !link->doit) {
 		family = PF_UNSPEC;
-		handlers = rcu_dereference(rtnl_msg_handlers[family]);
+		link = rtnl_get_link(PF_UNSPEC, type);
+		if (!link || !link->doit)
+			goto out_unlock;
 	}
 
-	flags = READ_ONCE(handlers[type].flags);
+	owner = link->owner;
+	if (!try_module_get(owner)) {
+		err = -EPROTONOSUPPORT;
+		goto out_unlock;
+	}
+
+	flags = link->flags;
 	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
-		refcount_inc(&rtnl_msg_handlers_ref[family]);
-		doit = READ_ONCE(handlers[type].doit);
+		doit = link->doit;
 		rcu_read_unlock();
 		if (doit)
 			err = doit(skb, nlh, extack);
-		refcount_dec(&rtnl_msg_handlers_ref[family]);
+		module_put(owner);
 		return err;
 	}
-
 	rcu_read_unlock();
 
 	rtnl_lock();
-	handlers = rtnl_dereference(rtnl_msg_handlers[family]);
-	if (handlers) {
-		doit = READ_ONCE(handlers[type].doit);
-		if (doit)
-			err = doit(skb, nlh, extack);
-	}
+	link = rtnl_get_link(family, type);
+	if (link && link->doit)
+		err = link->doit(skb, nlh, extack);
 	rtnl_unlock();
+
+	module_put(owner);
+
+	return err;
+
+out_unlock:
+	rcu_read_unlock();
 	return err;
 
 err_unlock:
@@ -4498,11 +4623,6 @@ static struct pernet_operations rtnetlink_net_ops = {
 
 void __init rtnetlink_init(void)
 {
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++)
-		refcount_set(&rtnl_msg_handlers_ref[i], 1);
-
 	if (register_pernet_subsys(&rtnetlink_net_ops))
 		panic("rtnetlink_init: cannot initialize rtnetlink\n");
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 08f5740..01e8285 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3656,6 +3656,10 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
 		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
 					      SKBTX_SHARED_FRAG;
 
+		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+			goto err;
+
 		while (pos < offset + len) {
 			if (i >= nfrags) {
 				BUG_ON(skb_headlen(list_skb));
@@ -3667,6 +3671,11 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
 
 				BUG_ON(!nfrags);
 
+				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
+				    skb_zerocopy_clone(nskb, frag_skb,
+						       GFP_ATOMIC))
+					goto err;
+
 				list_skb = list_skb->next;
 			}
 
@@ -3678,11 +3687,6 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
 				goto err;
 			}
 
-			if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
-				goto err;
-			if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
-				goto err;
-
 			*nskb_frag = *frag;
 			__skb_frag_ref(nskb_frag);
 			size = skb_frag_size(nskb_frag);
diff --git a/net/core/sock.c b/net/core/sock.c
index c0b5b2f..72d14b2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -145,6 +145,8 @@
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
+static void sock_inuse_add(struct net *net, int val);
+
 /**
  * sk_ns_capable - General socket capability test
  * @sk: Socket to use a capability on or through
@@ -1531,8 +1533,11 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
 		sk->sk_kern_sock = kern;
 		sock_lock_init(sk);
 		sk->sk_net_refcnt = kern ? 0 : 1;
-		if (likely(sk->sk_net_refcnt))
+		if (likely(sk->sk_net_refcnt)) {
 			get_net(net);
+			sock_inuse_add(net, 1);
+		}
+
 		sock_net_set(sk, net);
 		refcount_set(&sk->sk_wmem_alloc, 1);
 
@@ -1595,6 +1600,9 @@ void sk_destruct(struct sock *sk)
 
 static void __sk_free(struct sock *sk)
 {
+	if (likely(sk->sk_net_refcnt))
+		sock_inuse_add(sock_net(sk), -1);
+
 	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
 		sock_diag_broadcast_destroy(sk);
 	else
@@ -1716,6 +1724,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 		newsk->sk_priority = 0;
 		newsk->sk_incoming_cpu = raw_smp_processor_id();
 		atomic64_set(&newsk->sk_cookie, 0);
+		if (likely(newsk->sk_net_refcnt))
+			sock_inuse_add(sock_net(newsk), 1);
 
 		/*
 		 * Before updating sk_refcnt, we must commit prior changes to memory
@@ -3045,7 +3055,7 @@ static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
 
 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
 {
-	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
+	__this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
 }
 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
 
@@ -3055,21 +3065,50 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
 	int res = 0;
 
 	for_each_possible_cpu(cpu)
-		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
+		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
 
 	return res >= 0 ? res : 0;
 }
 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
 
+static void sock_inuse_add(struct net *net, int val)
+{
+	this_cpu_add(*net->core.sock_inuse, val);
+}
+
+int sock_inuse_get(struct net *net)
+{
+	int cpu, res = 0;
+
+	for_each_possible_cpu(cpu)
+		res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+
+	return res;
+}
+
+EXPORT_SYMBOL_GPL(sock_inuse_get);
+
 static int __net_init sock_inuse_init_net(struct net *net)
 {
-	net->core.inuse = alloc_percpu(struct prot_inuse);
-	return net->core.inuse ? 0 : -ENOMEM;
+	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
+	if (net->core.prot_inuse == NULL)
+		return -ENOMEM;
+
+	net->core.sock_inuse = alloc_percpu(int);
+	if (net->core.sock_inuse == NULL)
+		goto out;
+
+	return 0;
+
+out:
+	free_percpu(net->core.prot_inuse);
+	return -ENOMEM;
 }
 
 static void __net_exit sock_inuse_exit_net(struct net *net)
 {
-	free_percpu(net->core.inuse);
+	free_percpu(net->core.prot_inuse);
+	free_percpu(net->core.sock_inuse);
 }
 
 static struct pernet_operations net_inuse_ops = {
@@ -3112,6 +3151,10 @@ static inline void assign_proto_idx(struct proto *prot)
 static inline void release_proto_idx(struct proto *prot)
 {
 }
+
+static void sock_inuse_add(struct net *net, int val)
+{
+}
 #endif
 
 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index 5eeb1d2..c5bb52b 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -235,7 +235,9 @@ struct sock *reuseport_select_sock(struct sock *sk,
 
 		if (prog && skb)
 			sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
-		else
+
+		/* no bpf or invalid bpf result: fall back to hash usage */
+		if (!sk2)
 			sk2 = reuse->socks[reciprocal_scale(hash, socks)];
 	}
 
diff --git a/net/core/xdp.c b/net/core/xdp.c
new file mode 100644
index 0000000..097a0f7
--- /dev/null
+++ b/net/core/xdp.c
@@ -0,0 +1,73 @@
+/* net/core/xdp.c
+ *
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ * Released under terms in GPL version 2.  See COPYING.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <net/xdp.h>
+
+#define REG_STATE_NEW		0x0
+#define REG_STATE_REGISTERED	0x1
+#define REG_STATE_UNREGISTERED	0x2
+#define REG_STATE_UNUSED	0x3
+
+void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
+{
+	/* Simplify driver cleanup code paths, allow unreg "unused" */
+	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
+		return;
+
+	WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
+
+	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
+	xdp_rxq->dev = NULL;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
+
+static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
+{
+	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
+}
+
+/* Returns 0 on success, negative on failure */
+int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
+		     struct net_device *dev, u32 queue_index)
+{
+	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
+		WARN(1, "Driver promised not to register this");
+		return -EINVAL;
+	}
+
+	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
+		WARN(1, "Missing unregister, handled but fix driver");
+		xdp_rxq_info_unreg(xdp_rxq);
+	}
+
+	if (!dev) {
+		WARN(1, "Missing net_device from driver");
+		return -ENODEV;
+	}
+
+	/* State either UNREGISTERED or NEW */
+	xdp_rxq_info_init(xdp_rxq);
+	xdp_rxq->dev = dev;
+	xdp_rxq->queue_index = queue_index;
+
+	xdp_rxq->reg_state = REG_STATE_REGISTERED;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
+
+void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
+{
+	xdp_rxq->reg_state = REG_STATE_UNUSED;
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
+
+bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
+{
+	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
+}
+EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index 8c0ef71..b270e84d 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -39,23 +39,6 @@
 
 	  Just say N.
 
-config NET_DCCPPROBE
-	tristate "DCCP connection probing"
-	depends on PROC_FS && KPROBES
-	---help---
-	This module allows for capturing the changes to DCCP connection
-	state in response to incoming packets. It is used for debugging
-	DCCP congestion avoidance modules. If you don't understand
-	what was just said, you don't need it: say N.
-
-	Documentation on how to use DCCP connection probing can be found
-	at:
-	
-	  http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
-
-	To compile this code as a module, choose M here: the
-	module will be called dccp_probe.
-
 
 endmenu
 
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 2e7b560..5b4ff37 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -21,9 +21,10 @@
 dccp_ipv6-y := ipv6.o
 
 obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o
-obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o
 
 dccp-$(CONFIG_SYSCTL) += sysctl.o
 
 dccp_diag-y := diag.o
-dccp_probe-y := probe.o
+
+# build with local directory for trace.h
+CFLAGS_proto.o := -I$(src)
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 3de0d03..2a24f7d 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -228,7 +228,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
 	}
 
 	if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
-		DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
+		DCCP_CRIT("Ack Vector buffer overflow: dropping old entries");
 		av->av_overflow = true;
 	}
 
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 178bb98..37ccbe6 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -63,9 +63,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
 		 */
 		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
-		/* Linkage updates. */
-		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
-		inet_twsk_put(tw);
+		/* Linkage updates.
+		 * Note that access to tw after this point is illegal.
+		 */
+		inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
 		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
deleted file mode 100644
index 3d3fda0..0000000
--- a/net/dccp/probe.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * dccp_probe - Observe the DCCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for DCCP from Stephen Hemminger's code
- * Copyright (C) 2006, Ian McDonald <ian.mcdonald@jandi.co.nz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/dccp.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/vmalloc.h>
-#include <linux/time64.h>
-#include <linux/gfp.h>
-#include <net/net_namespace.h>
-
-#include "dccp.h"
-#include "ccid.h"
-#include "ccids/ccid3.h"
-
-static int port;
-
-static int bufsize = 64 * 1024;
-
-static const char procname[] = "dccpprobe";
-
-static struct {
-	struct kfifo	  fifo;
-	spinlock_t	  lock;
-	wait_queue_head_t wait;
-	struct timespec64 tstart;
-} dccpw;
-
-static void printl(const char *fmt, ...)
-{
-	va_list args;
-	int len;
-	struct timespec64 now;
-	char tbuf[256];
-
-	va_start(args, fmt);
-	getnstimeofday64(&now);
-
-	now = timespec64_sub(now, dccpw.tstart);
-
-	len = sprintf(tbuf, "%lu.%06lu ",
-		      (unsigned long) now.tv_sec,
-		      (unsigned long) now.tv_nsec / NSEC_PER_USEC);
-	len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
-	va_end(args);
-
-	kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
-	wake_up(&dccpw.wait);
-}
-
-static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
-{
-	const struct inet_sock *inet = inet_sk(sk);
-	struct ccid3_hc_tx_sock *hc = NULL;
-
-	if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
-		hc = ccid3_hc_tx_sk(sk);
-
-	if (port == 0 || ntohs(inet->inet_dport) == port ||
-	    ntohs(inet->inet_sport) == port) {
-		if (hc)
-			printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n",
-			       &inet->inet_saddr, ntohs(inet->inet_sport),
-			       &inet->inet_daddr, ntohs(inet->inet_dport), size,
-			       hc->tx_s, hc->tx_rtt, hc->tx_p,
-			       hc->tx_x_calc, hc->tx_x_recv >> 6,
-			       hc->tx_x >> 6, hc->tx_t_ipi);
-		else
-			printl("%pI4:%u %pI4:%u %d\n",
-			       &inet->inet_saddr, ntohs(inet->inet_sport),
-			       &inet->inet_daddr, ntohs(inet->inet_dport),
-			       size);
-	}
-
-	jprobe_return();
-	return 0;
-}
-
-static struct jprobe dccp_send_probe = {
-	.kp	= {
-		.symbol_name = "dccp_sendmsg",
-	},
-	.entry	= jdccp_sendmsg,
-};
-
-static int dccpprobe_open(struct inode *inode, struct file *file)
-{
-	kfifo_reset(&dccpw.fifo);
-	getnstimeofday64(&dccpw.tstart);
-	return 0;
-}
-
-static ssize_t dccpprobe_read(struct file *file, char __user *buf,
-			      size_t len, loff_t *ppos)
-{
-	int error = 0, cnt = 0;
-	unsigned char *tbuf;
-
-	if (!buf)
-		return -EINVAL;
-
-	if (len == 0)
-		return 0;
-
-	tbuf = vmalloc(len);
-	if (!tbuf)
-		return -ENOMEM;
-
-	error = wait_event_interruptible(dccpw.wait,
-					 kfifo_len(&dccpw.fifo) != 0);
-	if (error)
-		goto out_free;
-
-	cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock);
-	error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
-	vfree(tbuf);
-
-	return error ? error : cnt;
-}
-
-static const struct file_operations dccpprobe_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = dccpprobe_open,
-	.read    = dccpprobe_read,
-	.llseek  = noop_llseek,
-};
-
-static __init int dccpprobe_init(void)
-{
-	int ret = -ENOMEM;
-
-	init_waitqueue_head(&dccpw.wait);
-	spin_lock_init(&dccpw.lock);
-	if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL))
-		return ret;
-	if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
-		goto err0;
-
-	ret = register_jprobe(&dccp_send_probe);
-	if (ret) {
-		ret = request_module("dccp");
-		if (!ret)
-			ret = register_jprobe(&dccp_send_probe);
-	}
-
-	if (ret)
-		goto err1;
-
-	pr_info("DCCP watch registered (port=%d)\n", port);
-	return 0;
-err1:
-	remove_proc_entry(procname, init_net.proc_net);
-err0:
-	kfifo_free(&dccpw.fifo);
-	return ret;
-}
-module_init(dccpprobe_init);
-
-static __exit void dccpprobe_exit(void)
-{
-	kfifo_free(&dccpw.fifo);
-	remove_proc_entry(procname, init_net.proc_net);
-	unregister_jprobe(&dccp_send_probe);
-
-}
-module_exit(dccpprobe_exit);
-
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>");
-MODULE_DESCRIPTION("DCCP snooper");
-MODULE_LICENSE("GPL");
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9d43c1f..fa7e92e 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -38,6 +38,9 @@
 #include "dccp.h"
 #include "feat.h"
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
 
 EXPORT_SYMBOL_GPL(dccp_statistics);
@@ -110,7 +113,7 @@ void dccp_set_state(struct sock *sk, const int state)
 	/* Change state AFTER socket is unhashed to avoid closed
 	 * socket sitting in hash tables.
 	 */
-	sk->sk_state = state;
+	inet_sk_set_state(sk, state);
 }
 
 EXPORT_SYMBOL_GPL(dccp_set_state);
@@ -761,6 +764,8 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 	int rc, size;
 	long timeo;
 
+	trace_dccp_probe(sk, len);
+
 	if (len > dp->dccps_mss_cache)
 		return -EMSGSIZE;
 
diff --git a/net/dccp/trace.h b/net/dccp/trace.h
new file mode 100644
index 0000000..5062421
--- /dev/null
+++ b/net/dccp/trace.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dccp
+
+#if !defined(_TRACE_DCCP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DCCP_H
+
+#include <net/sock.h>
+#include "dccp.h"
+#include "ccids/ccid3.h"
+#include <linux/tracepoint.h>
+#include <trace/events/net_probe_common.h>
+
+TRACE_EVENT(dccp_probe,
+
+	TP_PROTO(struct sock *sk, size_t size),
+
+	TP_ARGS(sk, size),
+
+	TP_STRUCT__entry(
+		/* sockaddr_in6 is always bigger than sockaddr_in */
+		__array(__u8, saddr, sizeof(struct sockaddr_in6))
+		__array(__u8, daddr, sizeof(struct sockaddr_in6))
+		__field(__u16, sport)
+		__field(__u16, dport)
+		__field(__u16, size)
+		__field(__u16, tx_s)
+		__field(__u32, tx_rtt)
+		__field(__u32, tx_p)
+		__field(__u32, tx_x_calc)
+		__field(__u64, tx_x_recv)
+		__field(__u64, tx_x)
+		__field(__u32, tx_t_ipi)
+	),
+
+	TP_fast_assign(
+		const struct inet_sock *inet = inet_sk(sk);
+		struct ccid3_hc_tx_sock *hc = NULL;
+
+		if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
+			hc = ccid3_hc_tx_sk(sk);
+
+		memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+		memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+		TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
+		/* For filtering use */
+		__entry->sport = ntohs(inet->inet_sport);
+		__entry->dport = ntohs(inet->inet_dport);
+
+		__entry->size = size;
+		if (hc) {
+			__entry->tx_s = hc->tx_s;
+			__entry->tx_rtt = hc->tx_rtt;
+			__entry->tx_p = hc->tx_p;
+			__entry->tx_x_calc = hc->tx_x_calc;
+			__entry->tx_x_recv = hc->tx_x_recv >> 6;
+			__entry->tx_x = hc->tx_x >> 6;
+			__entry->tx_t_ipi = hc->tx_t_ipi;
+		} else {
+			__entry->tx_s = 0;
+			memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi -
+			       (void *)&__entry->tx_rtt +
+			       sizeof(__entry->tx_t_ipi));
+		}
+	),
+
+	TP_printk("src=%pISpc dest=%pISpc size=%d tx_s=%d tx_rtt=%d "
+		  "tx_p=%d tx_x_calc=%u tx_x_recv=%llu tx_x=%llu tx_t_ipi=%d",
+		  __entry->saddr, __entry->daddr, __entry->size,
+		  __entry->tx_s, __entry->tx_rtt, __entry->tx_p,
+		  __entry->tx_x_calc, __entry->tx_x_recv, __entry->tx_x,
+		  __entry->tx_t_ipi)
+);
+
+#endif /* _TRACE_TCP_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 9153247..d1885cf 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1418,9 +1418,12 @@ void __init dn_dev_init(void)
 
 	dn_dev_devices_on();
 
-	rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, 0);
-	rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, 0);
-	rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWADDR,
+			     dn_nl_newaddr, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELADDR,
+			     dn_nl_deladdr, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETADDR,
+			     NULL, dn_nl_dump_ifaddr, 0);
 
 	proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops);
 
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index b37a1b8..fce94cb 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -792,8 +792,10 @@ void __init dn_fib_init(void)
 
 	register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
 
-	rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, 0);
-	rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_NEWROUTE,
+			     dn_fib_rtm_newroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_DELROUTE,
+			     dn_fib_rtm_delroute, NULL, 0);
 }
 
 
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 324cb9f..73160d4 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -199,11 +199,11 @@ static void dn_dst_check_expire(struct timer_list *unused)
 						lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
 			if (atomic_read(&rt->dst.__refcnt) > 1 ||
 			    (now - rt->dst.lastuse) < expire) {
-				rtp = &rt->dst.dn_next;
+				rtp = &rt->dn_next;
 				continue;
 			}
-			*rtp = rt->dst.dn_next;
-			rt->dst.dn_next = NULL;
+			*rtp = rt->dn_next;
+			rt->dn_next = NULL;
 			dst_dev_put(&rt->dst);
 			dst_release(&rt->dst);
 		}
@@ -233,11 +233,11 @@ static int dn_dst_gc(struct dst_ops *ops)
 						lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
 			if (atomic_read(&rt->dst.__refcnt) > 1 ||
 			    (now - rt->dst.lastuse) < expire) {
-				rtp = &rt->dst.dn_next;
+				rtp = &rt->dn_next;
 				continue;
 			}
-			*rtp = rt->dst.dn_next;
-			rt->dst.dn_next = NULL;
+			*rtp = rt->dn_next;
+			rt->dn_next = NULL;
 			dst_dev_put(&rt->dst);
 			dst_release(&rt->dst);
 			break;
@@ -333,8 +333,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
 						lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
 		if (compare_keys(&rth->fld, &rt->fld)) {
 			/* Put it first */
-			*rthp = rth->dst.dn_next;
-			rcu_assign_pointer(rth->dst.dn_next,
+			*rthp = rth->dn_next;
+			rcu_assign_pointer(rth->dn_next,
 					   dn_rt_hash_table[hash].chain);
 			rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
 
@@ -345,10 +345,10 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
 			*rp = rth;
 			return 0;
 		}
-		rthp = &rth->dst.dn_next;
+		rthp = &rth->dn_next;
 	}
 
-	rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
+	rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain);
 	rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
 
 	dst_hold_and_use(&rt->dst, now);
@@ -369,8 +369,8 @@ static void dn_run_flush(struct timer_list *unused)
 			goto nothing_to_declare;
 
 		for(; rt; rt = next) {
-			next = rcu_dereference_raw(rt->dst.dn_next);
-			RCU_INIT_POINTER(rt->dst.dn_next, NULL);
+			next = rcu_dereference_raw(rt->dn_next);
+			RCU_INIT_POINTER(rt->dn_next, NULL);
 			dst_dev_put(&rt->dst);
 			dst_release(&rt->dst);
 		}
@@ -1183,6 +1183,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
 	if (rt == NULL)
 		goto e_nobufs;
 
+	rt->dn_next = NULL;
 	memset(&rt->fld, 0, sizeof(rt->fld));
 	rt->fld.saddr        = oldflp->saddr;
 	rt->fld.daddr        = oldflp->daddr;
@@ -1252,7 +1253,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *
 	if (!(flags & MSG_TRYHARD)) {
 		rcu_read_lock_bh();
 		for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
-			rt = rcu_dereference_bh(rt->dst.dn_next)) {
+			rt = rcu_dereference_bh(rt->dn_next)) {
 			if ((flp->daddr == rt->fld.daddr) &&
 			    (flp->saddr == rt->fld.saddr) &&
 			    (flp->flowidn_mark == rt->fld.flowidn_mark) &&
@@ -1448,6 +1449,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
 	if (rt == NULL)
 		goto e_nobufs;
 
+	rt->dn_next = NULL;
 	memset(&rt->fld, 0, sizeof(rt->fld));
 	rt->rt_saddr      = fld.saddr;
 	rt->rt_daddr      = fld.daddr;
@@ -1529,7 +1531,7 @@ static int dn_route_input(struct sk_buff *skb)
 
 	rcu_read_lock();
 	for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
-	    rt = rcu_dereference(rt->dst.dn_next)) {
+	    rt = rcu_dereference(rt->dn_next)) {
 		if ((rt->fld.saddr == cb->src) &&
 		    (rt->fld.daddr == cb->dst) &&
 		    (rt->fld.flowidn_oif == 0) &&
@@ -1749,7 +1751,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
 		rcu_read_lock_bh();
 		for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
 			rt;
-			rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
+			rt = rcu_dereference_bh(rt->dn_next), idx++) {
 			if (idx < s_idx)
 				continue;
 			skb_dst_set(skb, dst_clone(&rt->dst));
@@ -1795,7 +1797,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
 {
 	struct dn_rt_cache_iter_state *s = seq->private;
 
-	rt = rcu_dereference_bh(rt->dst.dn_next);
+	rt = rcu_dereference_bh(rt->dn_next);
 	while (!rt) {
 		rcu_read_unlock_bh();
 		if (--s->bucket < 0)
@@ -1921,11 +1923,11 @@ void __init dn_route_init(void)
 		    &dn_rt_cache_seq_fops);
 
 #ifdef CONFIG_DECNET_ROUTER
-	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
-		      dn_fib_dump, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
+			     dn_cache_getroute, dn_fib_dump, 0);
 #else
-	rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
-		      dn_cache_dump, 0);
+	rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE,
+			     dn_cache_getroute, dn_cache_dump, 0);
 #endif
 }
 
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 03c3bdf..bbf2c82 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -16,6 +16,15 @@
 
 if NET_DSA
 
+config NET_DSA_LEGACY
+	bool "Support for older platform device and Device Tree registration"
+	default y
+	---help---
+	  Say Y if you want to enable support for the older platform device and
+	  deprecated Device Tree binding registration.
+
+	  This feature is scheduled for removal in 4.17.
+
 # tagging formats
 config NET_DSA_TAG_BRCM
 	bool
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 0e13c1f..9e4d353 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 # the core
 obj-$(CONFIG_NET_DSA) += dsa_core.o
-dsa_core-y += dsa.o dsa2.o legacy.o master.o port.o slave.o switch.o
+dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o
+dsa_core-$(CONFIG_NET_DSA_LEGACY) += legacy.o
 
 # tagging formats
 dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 1e28742..21f9bed 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -241,7 +241,7 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
 		for (port = 0; port < ds->num_ports; port++) {
 			dp = &ds->ports[port];
 
-			if (dsa_port_is_user(dp))
+			if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 				dp->cpu_dp = dst->cpu_dp;
 		}
 	}
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 7d03669..cefb0c3 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -97,8 +97,17 @@ const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
 bool dsa_schedule_work(struct work_struct *work);
 
 /* legacy.c */
+#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
 int dsa_legacy_register(void);
 void dsa_legacy_unregister(void);
+#else
+static inline int dsa_legacy_register(void)
+{
+	return 0;
+}
+
+static inline void dsa_legacy_unregister(void) { }
+#endif
 int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 		       struct net_device *dev,
 		       const unsigned char *addr, u16 vid,
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index 84611d7..aa56d3f 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -718,26 +718,6 @@ static int dsa_resume(struct device *d)
 }
 #endif
 
-/* legacy way, bypassing the bridge *****************************************/
-int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-		       struct net_device *dev,
-		       const unsigned char *addr, u16 vid,
-		       u16 flags)
-{
-	struct dsa_port *dp = dsa_slave_to_port(dev);
-
-	return dsa_port_fdb_add(dp, addr, vid);
-}
-
-int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-		       struct net_device *dev,
-		       const unsigned char *addr, u16 vid)
-{
-	struct dsa_port *dp = dsa_slave_to_port(dev);
-
-	return dsa_port_fdb_del(dp, addr, vid);
-}
-
 static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume);
 
 static const struct of_device_id dsa_of_match_table[] = {
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index a95a55f..f523072 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -708,14 +708,12 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_mall_tc_entry *mall_tc_entry;
 	__be16 protocol = cls->common.protocol;
-	struct net *net = dev_net(dev);
 	struct dsa_switch *ds = dp->ds;
 	struct net_device *to_dev;
 	const struct tc_action *a;
 	struct dsa_port *to_dp;
 	int err = -EOPNOTSUPP;
 	LIST_HEAD(actions);
-	int ifindex;
 
 	if (!ds->ops->port_mirror_add)
 		return err;
@@ -729,8 +727,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
 	if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
 		struct dsa_mall_mirror_tc_entry *mirror;
 
-		ifindex = tcf_mirred_ifindex(a);
-		to_dev = __dev_get_by_index(net, ifindex);
+		to_dev = tcf_mirred_dev(a);
 		if (!to_dev)
 			return -EINVAL;
 
@@ -943,6 +940,26 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
 	.set_rxnfc		= dsa_slave_set_rxnfc,
 };
 
+/* legacy way, bypassing the bridge *****************************************/
+int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+		       struct net_device *dev,
+		       const unsigned char *addr, u16 vid,
+		       u16 flags)
+{
+	struct dsa_port *dp = dsa_slave_to_port(dev);
+
+	return dsa_port_fdb_add(dp, addr, vid);
+}
+
+int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+		       struct net_device *dev,
+		       const unsigned char *addr, u16 vid)
+{
+	struct dsa_port *dp = dsa_slave_to_port(dev);
+
+	return dsa_port_fdb_del(dp, addr, vid);
+}
+
 static const struct net_device_ops dsa_slave_netdev_ops = {
 	.ndo_open	 	= dsa_slave_open,
 	.ndo_stop		= dsa_slave_close,
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 29608d0..b935117 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -83,29 +83,52 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
 static int dsa_switch_fdb_add(struct dsa_switch *ds,
 			      struct dsa_notifier_fdb_info *info)
 {
-	/* Do not care yet about other switch chips of the fabric */
-	if (ds->index != info->sw_index)
-		return 0;
+	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
 	if (!ds->ops->port_fdb_add)
 		return -EOPNOTSUPP;
 
-	return ds->ops->port_fdb_add(ds, info->port, info->addr,
-				     info->vid);
+	return ds->ops->port_fdb_add(ds, port, info->addr, info->vid);
 }
 
 static int dsa_switch_fdb_del(struct dsa_switch *ds,
 			      struct dsa_notifier_fdb_info *info)
 {
-	/* Do not care yet about other switch chips of the fabric */
-	if (ds->index != info->sw_index)
-		return 0;
+	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
 	if (!ds->ops->port_fdb_del)
 		return -EOPNOTSUPP;
 
-	return ds->ops->port_fdb_del(ds, info->port, info->addr,
-				     info->vid);
+	return ds->ops->port_fdb_del(ds, port, info->addr, info->vid);
+}
+
+static int
+dsa_switch_mdb_prepare_bitmap(struct dsa_switch *ds,
+			      const struct switchdev_obj_port_mdb *mdb,
+			      const unsigned long *bitmap)
+{
+	int port, err;
+
+	if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
+		return -EOPNOTSUPP;
+
+	for_each_set_bit(port, bitmap, ds->num_ports) {
+		err = ds->ops->port_mdb_prepare(ds, port, mdb);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
+				      const struct switchdev_obj_port_mdb *mdb,
+				      const unsigned long *bitmap)
+{
+	int port;
+
+	for_each_set_bit(port, bitmap, ds->num_ports)
+		ds->ops->port_mdb_add(ds, port, mdb);
 }
 
 static int dsa_switch_mdb_add(struct dsa_switch *ds,
@@ -114,7 +137,7 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
 	const struct switchdev_obj_port_mdb *mdb = info->mdb;
 	struct switchdev_trans *trans = info->trans;
 	DECLARE_BITMAP(group, ds->num_ports);
-	int port, err;
+	int port;
 
 	/* Build a mask of Multicast group members */
 	bitmap_zero(group, ds->num_ports);
@@ -124,21 +147,10 @@ static int dsa_switch_mdb_add(struct dsa_switch *ds,
 		if (dsa_is_dsa_port(ds, port))
 			set_bit(port, group);
 
-	if (switchdev_trans_ph_prepare(trans)) {
-		if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
-			return -EOPNOTSUPP;
+	if (switchdev_trans_ph_prepare(trans))
+		return dsa_switch_mdb_prepare_bitmap(ds, mdb, group);
 
-		for_each_set_bit(port, group, ds->num_ports) {
-			err = ds->ops->port_mdb_prepare(ds, port, mdb, trans);
-			if (err)
-				return err;
-		}
-
-		return 0;
-	}
-
-	for_each_set_bit(port, group, ds->num_ports)
-		ds->ops->port_mdb_add(ds, port, mdb, trans);
+	dsa_switch_mdb_add_bitmap(ds, mdb, group);
 
 	return 0;
 }
@@ -157,13 +169,43 @@ static int dsa_switch_mdb_del(struct dsa_switch *ds,
 	return 0;
 }
 
+static int
+dsa_switch_vlan_prepare_bitmap(struct dsa_switch *ds,
+			       const struct switchdev_obj_port_vlan *vlan,
+			       const unsigned long *bitmap)
+{
+	int port, err;
+
+	if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
+		return -EOPNOTSUPP;
+
+	for_each_set_bit(port, bitmap, ds->num_ports) {
+		err = ds->ops->port_vlan_prepare(ds, port, vlan);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void
+dsa_switch_vlan_add_bitmap(struct dsa_switch *ds,
+			   const struct switchdev_obj_port_vlan *vlan,
+			   const unsigned long *bitmap)
+{
+	int port;
+
+	for_each_set_bit(port, bitmap, ds->num_ports)
+		ds->ops->port_vlan_add(ds, port, vlan);
+}
+
 static int dsa_switch_vlan_add(struct dsa_switch *ds,
 			       struct dsa_notifier_vlan_info *info)
 {
 	const struct switchdev_obj_port_vlan *vlan = info->vlan;
 	struct switchdev_trans *trans = info->trans;
 	DECLARE_BITMAP(members, ds->num_ports);
-	int port, err;
+	int port;
 
 	/* Build a mask of VLAN members */
 	bitmap_zero(members, ds->num_ports);
@@ -173,21 +215,10 @@ static int dsa_switch_vlan_add(struct dsa_switch *ds,
 		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
 			set_bit(port, members);
 
-	if (switchdev_trans_ph_prepare(trans)) {
-		if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
-			return -EOPNOTSUPP;
+	if (switchdev_trans_ph_prepare(trans))
+		return dsa_switch_vlan_prepare_bitmap(ds, vlan, members);
 
-		for_each_set_bit(port, members, ds->num_ports) {
-			err = ds->ops->port_vlan_prepare(ds, port, vlan, trans);
-			if (err)
-				return err;
-		}
-
-		return 0;
-	}
-
-	for_each_set_bit(port, members, ds->num_ports)
-		ds->ops->port_vlan_add(ds, port, vlan, trans);
+	dsa_switch_vlan_add_bitmap(ds, vlan, members);
 
 	return 0;
 }
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index e6e0b7b..2b06bb9 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -70,6 +70,18 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
 	if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
 		return NULL;
 
+	/* The Ethernet switch we are interfaced with needs packets to be at
+	 * least 64 bytes (including FCS) otherwise they will be discarded when
+	 * they enter the switch port logic. When Broadcom tags are enabled, we
+	 * need to make sure that packets are at least 68 bytes
+	 * (including FCS and tag) because the length verification is done after
+	 * the Broadcom tag is stripped off the ingress packet.
+	 *
+	 * Let dsa_slave_xmit() free the SKB
+	 */
+	if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false))
+		return NULL;
+
 	skb_push(skb, BRCM_TAG_LEN);
 
 	if (offset)
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 8475434..11535bc 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -13,10 +13,13 @@
  */
 
 #include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 
 #include "dsa_priv.h"
 
 #define MTK_HDR_LEN		4
+#define MTK_HDR_XMIT_UNTAGGED		0
+#define MTK_HDR_XMIT_TAGGED_TPID_8100	1
 #define MTK_HDR_RECV_SOURCE_PORT_MASK	GENMASK(2, 0)
 #define MTK_HDR_XMIT_DP_BIT_MASK	GENMASK(5, 0)
 
@@ -25,20 +28,37 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
 {
 	struct dsa_port *dp = dsa_slave_to_port(dev);
 	u8 *mtk_tag;
+	bool is_vlan_skb = true;
 
-	if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
-		return NULL;
+	/* Build the special tag after the MAC Source Address. If VLAN header
+	 * is present, it's required that VLAN header and special tag is
+	 * being combined. Only in this way we can allow the switch can parse
+	 * the both special and VLAN tag at the same time and then look up VLAN
+	 * table with VID.
+	 */
+	if (!skb_vlan_tagged(skb)) {
+		if (skb_cow_head(skb, MTK_HDR_LEN) < 0)
+			return NULL;
 
-	skb_push(skb, MTK_HDR_LEN);
+		skb_push(skb, MTK_HDR_LEN);
+		memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
+		is_vlan_skb = false;
+	}
 
-	memmove(skb->data, skb->data + MTK_HDR_LEN, 2 * ETH_ALEN);
-
-	/* Build the tag after the MAC Source Address */
 	mtk_tag = skb->data + 2 * ETH_ALEN;
-	mtk_tag[0] = 0;
+
+	/* Mark tag attribute on special tag insertion to notify hardware
+	 * whether that's a combined special tag with 802.1Q header.
+	 */
+	mtk_tag[0] = is_vlan_skb ? MTK_HDR_XMIT_TAGGED_TPID_8100 :
+		     MTK_HDR_XMIT_UNTAGGED;
 	mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
-	mtk_tag[2] = 0;
-	mtk_tag[3] = 0;
+
+	/* Tag control information is kept for 802.1Q */
+	if (!is_vlan_skb) {
+		mtk_tag[2] = 0;
+		mtk_tag[3] = 0;
+	}
 
 	return skb;
 }
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index c6c8ad1..47a0a66 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -43,7 +43,6 @@
 obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
 obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
 obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o
-obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
 obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
 obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
 obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f00499a..54cccdd 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -121,6 +121,7 @@
 #endif
 #include <net/l3mdev.h>
 
+#include <trace/events/sock.h>
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -789,7 +790,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
 	int addr_len = 0;
 	int err;
 
-	sock_rps_record_flow(sk);
+	if (likely(!(flags & MSG_ERRQUEUE)))
+		sock_rps_record_flow(sk);
 
 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
 				   flags & ~MSG_DONTWAIT, &addr_len);
@@ -1220,6 +1222,19 @@ int inet_sk_rebuild_header(struct sock *sk)
 }
 EXPORT_SYMBOL(inet_sk_rebuild_header);
 
+void inet_sk_set_state(struct sock *sk, int state)
+{
+	trace_inet_sock_set_state(sk, sk->sk_state, state);
+	sk->sk_state = state;
+}
+EXPORT_SYMBOL(inet_sk_set_state);
+
+void inet_sk_state_store(struct sock *sk, int newstate)
+{
+	trace_inet_sock_set_state(sk, sk->sk_state, newstate);
+	smp_store_release(&sk->sk_state, newstate);
+}
+
 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
 				 netdev_features_t features)
 {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d57aa64..6f00e43 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
 	struct sk_buff *skb = base->data;
+	struct xfrm_offload *xo = xfrm_offload(skb);
 	void *tmp;
-	struct dst_entry *dst = skb_dst(skb);
-	struct xfrm_state *x = dst->xfrm;
+	struct xfrm_state *x;
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME))
+		x = skb->sp->xvec[skb->sp->len - 1];
+	else
+		x = skb_dst(skb)->xfrm;
 
 	tmp = ESP_SKB_CB(skb)->tmp;
 	esp_ssg_unref(x, tmp);
 	kfree(tmp);
-	xfrm_output_resume(skb, err);
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+		if (err) {
+			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, skb->data - skb_mac_header(skb));
+		secpath_reset(skb);
+		xfrm_dev_resume(skb);
+	} else {
+		xfrm_output_resume(skb, err);
+	}
 }
 
 /* Move ESP header back into place. */
@@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x)
 	char aead_name[CRYPTO_MAX_ALG_NAME];
 	struct crypto_aead *aead;
 	int err;
-	u32 mask = 0;
 
 	err = -ENAMETOOLONG;
 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
 		goto error;
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(aead_name, 0, mask);
+	aead = crypto_alloc_aead(aead_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
@@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x)
 	char authenc_name[CRYPTO_MAX_ALG_NAME];
 	unsigned int keylen;
 	int err;
-	u32 mask = 0;
 
 	err = -EINVAL;
 	if (!x->ealg)
@@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x)
 			goto error;
 	}
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(authenc_name, 0, mask);
+	aead = crypto_alloc_aead(authenc_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index f8b918c..c359f3c 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 				        netdev_features_t features)
 {
-	__u32 seq;
-	int err = 0;
-	struct sk_buff *skb2;
 	struct xfrm_state *x;
 	struct ip_esp_hdr *esph;
 	struct crypto_aead *aead;
-	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	netdev_features_t esp_features = features;
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
 	if (!xo)
-		goto out;
-
-	seq = xo->seq.low;
+		return ERR_PTR(-EINVAL);
 
 	x = skb->sp->xvec[skb->sp->len - 1];
 	aead = x->data;
 	esph = ip_esp_hdr(skb);
 
 	if (esph->spi != x->id.spi)
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
 	skb->encap_hdr_csum = 1;
 
-	if (!(features & NETIF_F_HW_ESP))
+	if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+	    (x->xso.dev != skb->dev))
 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-	segs = x->outer_mode->gso_segment(x, skb, esp_features);
-	if (IS_ERR_OR_NULL(segs))
-		goto out;
+	xo->flags |= XFRM_GSO_SEGMENT;
 
-	__skb_pull(skb, skb->data - skb_mac_header(skb));
-
-	skb2 = segs;
-	do {
-		struct sk_buff *nskb = skb2->next;
-
-		xo = xfrm_offload(skb2);
-		xo->flags |= XFRM_GSO_SEGMENT;
-		xo->seq.low = seq;
-		xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
-		if(!(features & NETIF_F_HW_ESP))
-			xo->flags |= CRYPTO_FALLBACK;
-
-		x->outer_mode->xmit(x, skb2);
-
-		err = x->type_offload->xmit(x, skb2, esp_features);
-		if (err) {
-			kfree_skb_list(segs);
-			return ERR_PTR(err);
-		}
-
-		if (!skb_is_gso(skb2))
-			seq++;
-		else
-			seq += skb_shinfo(skb2)->gso_segs;
-
-		skb_push(skb2, skb2->mac_len);
-		skb2 = nskb;
-	} while (skb2);
-
-out:
-	return segs;
+	return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
 	struct crypto_aead *aead;
 	struct esp_info esp;
 	bool hw_offload = true;
+	__u32 seq;
 
 	esp.inplace = true;
 
@@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
 			return esp.nfrags;
 	}
 
+	seq = xo->seq.low;
+
 	esph = esp.esph;
 	esph->spi = x->id.spi;
 
 	skb_push(skb, -skb_network_offset(skb));
 
 	if (xo->flags & XFRM_GSO_SEGMENT) {
-		esph->seq_no = htonl(xo->seq.low);
-	} else {
-		ip_hdr(skb)->tot_len = htons(skb->len);
-		ip_send_check(ip_hdr(skb));
+		esph->seq_no = htonl(seq);
+
+		if (!skb_is_gso(skb))
+			xo->seq.low++;
+		else
+			xo->seq.low += skb_shinfo(skb)->gso_segs;
 	}
 
+	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
+
+	ip_hdr(skb)->tot_len = htons(skb->len);
+	ip_send_check(ip_hdr(skb));
+
 	if (hw_offload)
 		return 0;
 
-	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
 	err = esp_output_tail(x, skb, &esp);
 	if (err)
 		return err;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4ca46dc..12410ec 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -685,7 +685,7 @@ static void reqsk_timer_handler(struct timer_list *t)
 	int max_retries, thresh;
 	u8 defer_accept;
 
-	if (sk_state_load(sk_listener) != TCP_LISTEN)
+	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
 		goto drop;
 
 	max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
@@ -783,7 +783,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
 	if (newsk) {
 		struct inet_connection_sock *newicsk = inet_csk(newsk);
 
-		newsk->sk_state = TCP_SYN_RECV;
+		inet_sk_set_state(newsk, TCP_SYN_RECV);
 		newicsk->icsk_bind_hash = NULL;
 
 		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
@@ -877,7 +877,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
 	 * It is OK, because this socket enters to hash table only
 	 * after validation is complete.
 	 */
-	sk_state_store(sk, TCP_LISTEN);
+	inet_sk_state_store(sk, TCP_LISTEN);
 	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 		inet->inet_sport = htons(inet->inet_num);
 
@@ -888,7 +888,7 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
 			return 0;
 	}
 
-	sk->sk_state = TCP_CLOSE;
+	inet_sk_set_state(sk, TCP_CLOSE);
 	return err;
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c9c35b6..a383f29 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -564,12 +564,18 @@ static int inet_diag_bc_run(const struct nlattr *_bc,
 		case INET_DIAG_BC_JMP:
 			yes = 0;
 			break;
+		case INET_DIAG_BC_S_EQ:
+			yes = entry->sport == op[1].no;
+			break;
 		case INET_DIAG_BC_S_GE:
 			yes = entry->sport >= op[1].no;
 			break;
 		case INET_DIAG_BC_S_LE:
 			yes = entry->sport <= op[1].no;
 			break;
+		case INET_DIAG_BC_D_EQ:
+			yes = entry->dport == op[1].no;
+			break;
 		case INET_DIAG_BC_D_GE:
 			yes = entry->dport >= op[1].no;
 			break;
@@ -802,8 +808,10 @@ static int inet_diag_bc_audit(const struct nlattr *attr,
 			if (!valid_devcond(bc, len, &min_len))
 				return -EINVAL;
 			break;
+		case INET_DIAG_BC_S_EQ:
 		case INET_DIAG_BC_S_GE:
 		case INET_DIAG_BC_S_LE:
+		case INET_DIAG_BC_D_EQ:
 		case INET_DIAG_BC_D_GE:
 		case INET_DIAG_BC_D_LE:
 			if (!valid_port_comparison(bc, len, &min_len))
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index e7d15fb0..37b7da0 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -19,6 +19,7 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/vmalloc.h>
+#include <linux/bootmem.h>
 
 #include <net/addrconf.h>
 #include <net/inet_connection_sock.h>
@@ -168,6 +169,60 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
 }
 EXPORT_SYMBOL_GPL(__inet_inherit_port);
 
+static struct inet_listen_hashbucket *
+inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
+{
+	u32 hash;
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if (sk->sk_family == AF_INET6)
+		hash = ipv6_portaddr_hash(sock_net(sk),
+					  &sk->sk_v6_rcv_saddr,
+					  inet_sk(sk)->inet_num);
+	else
+#endif
+		hash = ipv4_portaddr_hash(sock_net(sk),
+					  inet_sk(sk)->inet_rcv_saddr,
+					  inet_sk(sk)->inet_num);
+	return inet_lhash2_bucket(h, hash);
+}
+
+static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
+{
+	struct inet_listen_hashbucket *ilb2;
+
+	if (!h->lhash2)
+		return;
+
+	ilb2 = inet_lhash2_bucket_sk(h, sk);
+
+	spin_lock(&ilb2->lock);
+	if (sk->sk_reuseport && sk->sk_family == AF_INET6)
+		hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
+				   &ilb2->head);
+	else
+		hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
+				   &ilb2->head);
+	ilb2->count++;
+	spin_unlock(&ilb2->lock);
+}
+
+static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
+{
+	struct inet_listen_hashbucket *ilb2;
+
+	if (!h->lhash2 ||
+	    WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
+		return;
+
+	ilb2 = inet_lhash2_bucket_sk(h, sk);
+
+	spin_lock(&ilb2->lock);
+	hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
+	ilb2->count--;
+	spin_unlock(&ilb2->lock);
+}
+
 static inline int compute_score(struct sock *sk, struct net *net,
 				const unsigned short hnum, const __be32 daddr,
 				const int dif, const int sdif, bool exact_dif)
@@ -207,6 +262,40 @@ static inline int compute_score(struct sock *sk, struct net *net,
  */
 
 /* called with rcu_read_lock() : No refcount taken on the socket */
+static struct sock *inet_lhash2_lookup(struct net *net,
+				struct inet_listen_hashbucket *ilb2,
+				struct sk_buff *skb, int doff,
+				const __be32 saddr, __be16 sport,
+				const __be32 daddr, const unsigned short hnum,
+				const int dif, const int sdif)
+{
+	bool exact_dif = inet_exact_dif_match(net, skb);
+	struct inet_connection_sock *icsk;
+	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	u32 phash = 0;
+
+	inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
+		sk = (struct sock *)icsk;
+		score = compute_score(sk, net, hnum, daddr,
+				      dif, sdif, exact_dif);
+		if (score > hiscore) {
+			if (sk->sk_reuseport) {
+				phash = inet_ehashfn(net, daddr, hnum,
+						     saddr, sport);
+				result = reuseport_select_sock(sk, phash,
+							       skb, doff);
+				if (result)
+					return result;
+			}
+			result = sk;
+			hiscore = score;
+		}
+	}
+
+	return result;
+}
+
 struct sock *__inet_lookup_listener(struct net *net,
 				    struct inet_hashinfo *hashinfo,
 				    struct sk_buff *skb, int doff,
@@ -216,32 +305,57 @@ struct sock *__inet_lookup_listener(struct net *net,
 {
 	unsigned int hash = inet_lhashfn(net, hnum);
 	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
-	int score, hiscore = 0, matches = 0, reuseport = 0;
 	bool exact_dif = inet_exact_dif_match(net, skb);
+	struct inet_listen_hashbucket *ilb2;
 	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	unsigned int hash2;
 	u32 phash = 0;
 
+	if (ilb->count <= 10 || !hashinfo->lhash2)
+		goto port_lookup;
+
+	/* Too many sk in the ilb bucket (which is hashed by port alone).
+	 * Try lhash2 (which is hashed by port and addr) instead.
+	 */
+
+	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	result = inet_lhash2_lookup(net, ilb2, skb, doff,
+				    saddr, sport, daddr, hnum,
+				    dif, sdif);
+	if (result)
+		return result;
+
+	/* Lookup lhash2 with INADDR_ANY */
+
+	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	return inet_lhash2_lookup(net, ilb2, skb, doff,
+				  saddr, sport, daddr, hnum,
+				  dif, sdif);
+
+port_lookup:
 	sk_for_each_rcu(sk, &ilb->head) {
 		score = compute_score(sk, net, hnum, daddr,
 				      dif, sdif, exact_dif);
 		if (score > hiscore) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				phash = inet_ehashfn(net, daddr, hnum,
 						     saddr, sport);
 				result = reuseport_select_sock(sk, phash,
 							       skb, doff);
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			hiscore = score;
-		} else if (score == hiscore && reuseport) {
-			matches++;
-			if (reciprocal_scale(phash, matches) == 0)
-				result = sk;
-			phash = next_pseudo_random32(phash);
 		}
 	}
 	return result;
@@ -430,7 +544,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 	} else {
 		percpu_counter_inc(sk->sk_prot->orphan_count);
-		sk->sk_state = TCP_CLOSE;
+		inet_sk_set_state(sk, TCP_CLOSE);
 		sock_set_flag(sk, SOCK_DEAD);
 		inet_csk_destroy_sock(sk);
 	}
@@ -483,6 +597,8 @@ int __inet_hash(struct sock *sk, struct sock *osk)
 		hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
 	else
 		hlist_add_head_rcu(&sk->sk_node, &ilb->head);
+	inet_hash2(hashinfo, sk);
+	ilb->count++;
 	sock_set_flag(sk, SOCK_RCU_FREE);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 unlock:
@@ -509,28 +625,35 @@ EXPORT_SYMBOL_GPL(inet_hash);
 void inet_unhash(struct sock *sk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+	struct inet_listen_hashbucket *ilb;
 	spinlock_t *lock;
 	bool listener = false;
-	int done;
 
 	if (sk_unhashed(sk))
 		return;
 
 	if (sk->sk_state == TCP_LISTEN) {
-		lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
+		ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
+		lock = &ilb->lock;
 		listener = true;
 	} else {
 		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
 	}
 	spin_lock_bh(lock);
+	if (sk_unhashed(sk))
+		goto unlock;
+
 	if (rcu_access_pointer(sk->sk_reuseport_cb))
 		reuseport_detach_sock(sk);
-	if (listener)
-		done = __sk_del_node_init(sk);
-	else
-		done = __sk_nulls_del_node_init_rcu(sk);
-	if (done)
-		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+	if (listener) {
+		inet_unhash2(hashinfo, sk);
+		 __sk_del_node_init(sk);
+		 ilb->count--;
+	} else {
+		__sk_nulls_del_node_init_rcu(sk);
+	}
+	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+unlock:
 	spin_unlock_bh(lock);
 }
 EXPORT_SYMBOL_GPL(inet_unhash);
@@ -665,10 +788,37 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
 	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
 		spin_lock_init(&h->listening_hash[i].lock);
 		INIT_HLIST_HEAD(&h->listening_hash[i].head);
+		h->listening_hash[i].count = 0;
 	}
+
+	h->lhash2 = NULL;
 }
 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
 
+void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
+				unsigned long numentries, int scale,
+				unsigned long low_limit,
+				unsigned long high_limit)
+{
+	unsigned int i;
+
+	h->lhash2 = alloc_large_system_hash(name,
+					    sizeof(*h->lhash2),
+					    numentries,
+					    scale,
+					    0,
+					    NULL,
+					    &h->lhash2_mask,
+					    low_limit,
+					    high_limit);
+
+	for (i = 0; i <= h->lhash2_mask; i++) {
+		spin_lock_init(&h->lhash2[i].lock);
+		INIT_HLIST_HEAD(&h->lhash2[i].head);
+		h->lhash2[i].count = 0;
+	}
+}
+
 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
 {
 	unsigned int locksz = sizeof(spinlock_t);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index b563e0c..277ff69 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -97,7 +97,7 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
  * Essentially we whip up a timewait bucket, copy the relevant info into it
  * from the SK, and mess with hash chains and list linkage.
  */
-void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 			   struct inet_hashinfo *hashinfo)
 {
 	const struct inet_sock *inet = inet_sk(sk);
@@ -119,18 +119,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 
 	spin_lock(lock);
 
-	/*
-	 * Step 2: Hash TW into tcp ehash chain.
-	 * Notes :
-	 * - tw_refcnt is set to 4 because :
-	 * - We have one reference from bhash chain.
-	 * - We have one reference from ehash chain.
-	 * - We have one reference from timer.
-	 * - One reference for ourself (our caller will release it).
-	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
-	 * committed into memory all tw fields.
-	 */
-	refcount_set(&tw->tw_refcnt, 4);
 	inet_twsk_add_node_rcu(tw, &ehead->chain);
 
 	/* Step 3: Remove SK from hash chain */
@@ -138,8 +126,19 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
 	spin_unlock(lock);
+
+	/* tw_refcnt is set to 3 because we have :
+	 * - one reference for bhash chain.
+	 * - one reference for ehash chain.
+	 * - one reference for timer.
+	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
+	 * committed into memory all tw fields.
+	 * Also note that after this point, we lost our implicit reference
+	 * so we are not allowed to use tw anymore.
+	 */
+	refcount_set(&tw->tw_refcnt, 3);
 }
-EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
+EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
 
 static void tw_timer_handler(struct timer_list *t)
 {
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 45ffd3d..b61f228 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -114,7 +114,8 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
 static int ipgre_tunnel_init(struct net_device *dev);
 static void erspan_build_header(struct sk_buff *skb,
-				__be32 id, u32 index, bool truncate);
+				__be32 id, u32 index,
+				bool truncate, bool is_ipv4);
 
 static unsigned int ipgre_net_id __read_mostly;
 static unsigned int gre_tap_net_id __read_mostly;
@@ -255,34 +256,43 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 {
 	struct net *net = dev_net(skb->dev);
 	struct metadata_dst *tun_dst = NULL;
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *pkt_md;
 	struct ip_tunnel_net *itn;
 	struct ip_tunnel *tunnel;
-	struct erspanhdr *ershdr;
 	const struct iphdr *iph;
-	__be32 index;
+	int ver;
 	int len;
 
 	itn = net_generic(net, erspan_net_id);
 	len = gre_hdr_len + sizeof(*ershdr);
 
+	/* Check based hdr len */
 	if (unlikely(!pskb_may_pull(skb, len)))
 		return PACKET_REJECT;
 
 	iph = ip_hdr(skb);
-	ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len);
+	ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+	ver = (ntohs(ershdr->ver_vlan) & VER_MASK) >> VER_OFFSET;
 
 	/* The original GRE header does not have key field,
 	 * Use ERSPAN 10-bit session ID as key.
 	 */
 	tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
-	index = ershdr->md.index;
 	tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
 				  tpi->flags | TUNNEL_KEY,
 				  iph->saddr, iph->daddr, tpi->key);
 
 	if (tunnel) {
+		len = gre_hdr_len + erspan_hdr_len(ver);
+		if (unlikely(!pskb_may_pull(skb, len)))
+			return PACKET_REJECT;
+
+		ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
+		pkt_md = (struct erspan_metadata *)(ershdr + 1);
+
 		if (__iptunnel_pull_header(skb,
-					   gre_hdr_len + sizeof(*ershdr),
+					   len,
 					   htons(ETH_P_TEB),
 					   false, false) < 0)
 			goto drop;
@@ -303,15 +313,27 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
 				return PACKET_REJECT;
 
 			md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
-			if (!md)
-				return PACKET_REJECT;
+			memcpy(md, pkt_md, sizeof(*md));
+			md->version = ver;
 
-			md->index = index;
 			info = &tun_dst->u.tun_info;
 			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
 			info->options_len = sizeof(*md);
 		} else {
-			tunnel->index = ntohl(index);
+			tunnel->erspan_ver = ver;
+			if (ver == 1) {
+				tunnel->index = ntohl(pkt_md->u.index);
+			} else {
+				u16 md2_flags;
+				u16 dir, hwid;
+
+				md2_flags = ntohs(pkt_md->u.md2.flags);
+				dir = (md2_flags & DIR_MASK) >> DIR_OFFSET;
+				hwid = (md2_flags & HWID_MASK) >> HWID_OFFSET;
+				tunnel->dir = dir;
+				tunnel->hwid = hwid;
+			}
+
 		}
 
 		skb_reset_mac_header(skb);
@@ -405,14 +427,17 @@ static int gre_rcv(struct sk_buff *skb)
 	if (hdr_len < 0)
 		goto drop;
 
-	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN))) {
+	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
+		     tpi.proto == htons(ETH_P_ERSPAN2))) {
 		if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 			return 0;
+		goto out;
 	}
 
 	if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
 		return 0;
 
+out:
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 drop:
 	kfree_skb(skb);
@@ -560,6 +585,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 	bool truncate = false;
 	struct flowi4 fl;
 	int tunnel_hlen;
+	int version;
 	__be16 df;
 
 	tun_info = skb_tunnel_info(skb);
@@ -568,9 +594,13 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 		goto err_free_skb;
 
 	key = &tun_info->key;
+	md = ip_tunnel_info_opts(tun_info);
+	if (!md)
+		goto err_free_rt;
 
 	/* ERSPAN has fixed 8 byte GRE header */
-	tunnel_hlen = 8 + sizeof(struct erspanhdr);
+	version = md->version;
+	tunnel_hlen = 8 + erspan_hdr_len(version);
 
 	rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
 	if (!rt)
@@ -584,12 +614,23 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
 		truncate = true;
 	}
 
-	md = ip_tunnel_info_opts(tun_info);
-	if (!md)
-		goto err_free_rt;
+	if (version == 1) {
+		erspan_build_header(skb, tunnel_id_to_key32(key->tun_id),
+				    ntohl(md->u.index), truncate, true);
+	} else if (version == 2) {
+		u16 md2_flags;
+		u8 direction;
+		u16 hwid;
 
-	erspan_build_header(skb, tunnel_id_to_key32(key->tun_id),
-			    ntohl(md->index), truncate);
+		md2_flags = ntohs(md->u.md2.flags);
+		direction = (md2_flags & DIR_MASK) >> DIR_OFFSET;
+		hwid = (md2_flags & HWID_MASK) >> HWID_OFFSET;
+
+		erspan_build_header_v2(skb, tunnel_id_to_key32(key->tun_id),
+				       direction, hwid,	truncate, true);
+	} else {
+		goto err_free_rt;
+	}
 
 	gre_build_header(skb, 8, TUNNEL_SEQ,
 			 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
@@ -668,52 +709,6 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 	return NETDEV_TX_OK;
 }
 
-static inline u8 tos_to_cos(u8 tos)
-{
-	u8 dscp, cos;
-
-	dscp = tos >> 2;
-	cos = dscp >> 3;
-	return cos;
-}
-
-static void erspan_build_header(struct sk_buff *skb,
-				__be32 id, u32 index, bool truncate)
-{
-	struct iphdr *iphdr = ip_hdr(skb);
-	struct ethhdr *eth = eth_hdr(skb);
-	enum erspan_encap_type enc_type;
-	struct erspanhdr *ershdr;
-	struct qtag_prefix {
-		__be16 eth_type;
-		__be16 tci;
-	} *qp;
-	u16 vlan_tci = 0;
-
-	enc_type = ERSPAN_ENCAP_NOVLAN;
-
-	/* If mirrored packet has vlan tag, extract tci and
-	 *  perserve vlan header in the mirrored frame.
-	 */
-	if (eth->h_proto == htons(ETH_P_8021Q)) {
-		qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN);
-		vlan_tci = ntohs(qp->tci);
-		enc_type = ERSPAN_ENCAP_INFRAME;
-	}
-
-	skb_push(skb, sizeof(*ershdr));
-	ershdr = (struct erspanhdr *)skb->data;
-	memset(ershdr, 0, sizeof(*ershdr));
-
-	ershdr->ver_vlan = htons((vlan_tci & VLAN_MASK) |
-				 (ERSPAN_VERSION << VER_OFFSET));
-	ershdr->session_id = htons((u16)(ntohl(id) & ID_MASK) |
-			   ((tos_to_cos(iphdr->tos) << COS_OFFSET) & COS_MASK) |
-			   (enc_type << EN_OFFSET & EN_MASK) |
-			   ((truncate << T_OFFSET) & T_MASK));
-	ershdr->md.index = htonl(index & INDEX_MASK);
-}
-
 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 			       struct net_device *dev)
 {
@@ -737,7 +732,14 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
 	}
 
 	/* Push ERSPAN header */
-	erspan_build_header(skb, tunnel->parms.o_key, tunnel->index, truncate);
+	if (tunnel->erspan_ver == 1)
+		erspan_build_header(skb, tunnel->parms.o_key, tunnel->index,
+				    truncate, true);
+	else
+		erspan_build_header_v2(skb, tunnel->parms.o_key,
+				       tunnel->dir, tunnel->hwid,
+				       truncate, true);
+
 	tunnel->parms.o_flags &= ~TUNNEL_KEY;
 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
 	return NETDEV_TX_OK;
@@ -1209,13 +1211,32 @@ static int ipgre_netlink_parms(struct net_device *dev,
 	if (data[IFLA_GRE_FWMARK])
 		*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
 
-	if (data[IFLA_GRE_ERSPAN_INDEX]) {
-		t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+	if (data[IFLA_GRE_ERSPAN_VER]) {
+		t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
-		if (t->index & ~INDEX_MASK)
+		if (t->erspan_ver != 1 && t->erspan_ver != 2)
 			return -EINVAL;
 	}
 
+	if (t->erspan_ver == 1) {
+		if (data[IFLA_GRE_ERSPAN_INDEX]) {
+			t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+			if (t->index & ~INDEX_MASK)
+				return -EINVAL;
+		}
+	} else if (t->erspan_ver == 2) {
+		if (data[IFLA_GRE_ERSPAN_DIR]) {
+			t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+			if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
+				return -EINVAL;
+		}
+		if (data[IFLA_GRE_ERSPAN_HWID]) {
+			t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+			if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
+				return -EINVAL;
+		}
+	}
+
 	return 0;
 }
 
@@ -1282,7 +1303,7 @@ static int erspan_tunnel_init(struct net_device *dev)
 	tunnel->tun_hlen = 8;
 	tunnel->parms.iph.protocol = IPPROTO_GRE;
 	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
-		       sizeof(struct erspanhdr);
+		       erspan_hdr_len(tunnel->erspan_ver);
 	t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
 	dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
@@ -1413,6 +1434,12 @@ static size_t ipgre_get_size(const struct net_device *dev)
 		nla_total_size(4) +
 		/* IFLA_GRE_ERSPAN_INDEX */
 		nla_total_size(4) +
+		/* IFLA_GRE_ERSPAN_VER */
+		nla_total_size(1) +
+		/* IFLA_GRE_ERSPAN_DIR */
+		nla_total_size(1) +
+		/* IFLA_GRE_ERSPAN_HWID */
+		nla_total_size(2) +
 		0;
 }
 
@@ -1455,9 +1482,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 			goto nla_put_failure;
 	}
 
-	if (t->index)
+	if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
+		goto nla_put_failure;
+
+	if (t->erspan_ver == 1) {
 		if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
 			goto nla_put_failure;
+	} else if (t->erspan_ver == 2) {
+		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
+			goto nla_put_failure;
+		if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
+			goto nla_put_failure;
+	}
 
 	return 0;
 
@@ -1493,6 +1529,9 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
 	[IFLA_GRE_IGNORE_DF]	= { .type = NLA_U8 },
 	[IFLA_GRE_FWMARK]	= { .type = NLA_U32 },
 	[IFLA_GRE_ERSPAN_INDEX]	= { .type = NLA_U32 },
+	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
 };
 
 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index c0cc6aa..e6774cc 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -80,35 +80,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
 }
 EXPORT_SYMBOL(ip_route_me_harder);
 
-/*
- * Extra routing may needed on local out, as the QUEUE target never
- * returns control to the table.
- */
-
-struct ip_rt_info {
-	__be32 daddr;
-	__be32 saddr;
-	u_int8_t tos;
-	u_int32_t mark;
-};
-
-static void nf_ip_saveroute(const struct sk_buff *skb,
-			    struct nf_queue_entry *entry)
-{
-	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
-	if (entry->state.hook == NF_INET_LOCAL_OUT) {
-		const struct iphdr *iph = ip_hdr(skb);
-
-		rt_info->tos = iph->tos;
-		rt_info->daddr = iph->daddr;
-		rt_info->saddr = iph->saddr;
-		rt_info->mark = skb->mark;
-	}
-}
-
-static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
-			 const struct nf_queue_entry *entry)
+int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry)
 {
 	const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
 
@@ -119,10 +91,12 @@ static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
 		      skb->mark == rt_info->mark &&
 		      iph->daddr == rt_info->daddr &&
 		      iph->saddr == rt_info->saddr))
-			return ip_route_me_harder(net, skb, RTN_UNSPEC);
+			return ip_route_me_harder(entry->state.net, skb,
+						  RTN_UNSPEC);
 	}
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nf_ip_reroute);
 
 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 			    unsigned int dataoff, u_int8_t protocol)
@@ -155,9 +129,9 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 }
 EXPORT_SYMBOL(nf_ip_checksum);
 
-static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
-				      unsigned int dataoff, unsigned int len,
-				      u_int8_t protocol)
+__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			       unsigned int dataoff, unsigned int len,
+			       u_int8_t protocol)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	__sum16 csum = 0;
@@ -175,9 +149,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
 	}
 	return csum;
 }
+EXPORT_SYMBOL_GPL(nf_ip_checksum_partial);
 
-static int nf_ip_route(struct net *net, struct dst_entry **dst,
-		       struct flowi *fl, bool strict __always_unused)
+int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+		bool strict __always_unused)
 {
 	struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
 	if (IS_ERR(rt))
@@ -185,19 +160,4 @@ static int nf_ip_route(struct net *net, struct dst_entry **dst,
 	*dst = &rt->dst;
 	return 0;
 }
-
-static const struct nf_afinfo nf_ip_afinfo = {
-	.family			= AF_INET,
-	.checksum		= nf_ip_checksum,
-	.checksum_partial	= nf_ip_checksum_partial,
-	.route			= nf_ip_route,
-	.saveroute		= nf_ip_saveroute,
-	.reroute		= nf_ip_reroute,
-	.route_key_size		= sizeof(struct ip_rt_info),
-};
-
-static int __init ipv4_netfilter_init(void)
-{
-	return nf_register_afinfo(&nf_ip_afinfo);
-}
-subsys_initcall(ipv4_netfilter_init);
+EXPORT_SYMBOL_GPL(nf_ip_route);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index c11eb17..7d5d444 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -72,11 +72,20 @@
 
 config NF_TABLES_ARP
 	tristate "ARP nf_tables support"
+	select NETFILTER_FAMILY_ARP
 	help
 	  This option enables the ARP support for nf_tables.
 
 endif # NF_TABLES
 
+config NF_FLOW_TABLE_IPV4
+	select NF_FLOW_TABLE
+	tristate "Netfilter flow table IPv4 module"
+	help
+	  This option adds the flow table IPv4 support.
+
+	  To compile it as a module, choose M here.
+
 config NF_DUP_IPV4
 	tristate "Netfilter IPv4 packet duplication to alternate destination"
 	depends on !NF_CONNTRACK || NF_CONNTRACK
@@ -392,6 +401,7 @@
 config IP_NF_ARPTABLES
 	tristate "ARP tables support"
 	select NETFILTER_XTABLES
+	select NETFILTER_FAMILY_ARP
 	depends on NETFILTER_ADVANCED
 	help
 	  arptables is a general, extensible packet identification framework.
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index adcdae3..8bb1f0c 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -43,6 +43,9 @@
 obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
 obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
 
+# flow table support
+obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o
+
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 0c3c944..bf8a534 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -810,9 +810,8 @@ static int get_info(struct net *net, void __user *user,
 	if (compat)
 		xt_compat_lock(NFPROTO_ARP);
 #endif
-	t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
-				    "arptable_%s", name);
-	if (t) {
+	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
+	if (!IS_ERR(t)) {
 		struct arpt_getinfo info;
 		const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
@@ -841,7 +840,7 @@ static int get_info(struct net *net, void __user *user,
 		xt_table_unlock(t);
 		module_put(t->me);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
 	if (compat)
 		xt_compat_unlock(NFPROTO_ARP);
@@ -866,7 +865,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 
 		if (get.size == private->size)
@@ -878,7 +877,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	return ret;
 }
@@ -903,10 +902,9 @@ static int __do_replace(struct net *net, const char *name,
 		goto out;
 	}
 
-	t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name),
-				    "arptable_%s", name);
-	if (!t) {
-		ret = -ENOENT;
+	t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free_newinfo_counters_untrans;
 	}
 
@@ -1020,8 +1018,8 @@ static int do_add_counters(struct net *net, const void __user *user,
 		return PTR_ERR(paddc);
 
 	t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
-	if (!t) {
-		ret = -ENOENT;
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free;
 	}
 
@@ -1408,7 +1406,7 @@ static int compat_get_entries(struct net *net,
 
 	xt_compat_lock(NFPROTO_ARP);
 	t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
 
@@ -1423,7 +1421,7 @@ static int compat_get_entries(struct net *net,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	xt_compat_unlock(NFPROTO_ARP);
 	return ret;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2e0d339..0b975aa 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -973,9 +973,8 @@ static int get_info(struct net *net, void __user *user,
 	if (compat)
 		xt_compat_lock(AF_INET);
 #endif
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
-				    "iptable_%s", name);
-	if (t) {
+	t = xt_request_find_table_lock(net, AF_INET, name);
+	if (!IS_ERR(t)) {
 		struct ipt_getinfo info;
 		const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
@@ -1005,7 +1004,7 @@ static int get_info(struct net *net, void __user *user,
 		xt_table_unlock(t);
 		module_put(t->me);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
 	if (compat)
 		xt_compat_unlock(AF_INET);
@@ -1030,7 +1029,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
 	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, AF_INET, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		if (get.size == private->size)
 			ret = copy_entries_to_user(private->size,
@@ -1041,7 +1040,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	return ret;
 }
@@ -1064,10 +1063,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 		goto out;
 	}
 
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
-				    "iptable_%s", name);
-	if (!t) {
-		ret = -ENOENT;
+	t = xt_request_find_table_lock(net, AF_INET, name);
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free_newinfo_counters_untrans;
 	}
 
@@ -1181,8 +1179,8 @@ do_add_counters(struct net *net, const void __user *user,
 		return PTR_ERR(paddc);
 
 	t = xt_find_table_lock(net, AF_INET, tmp.name);
-	if (!t) {
-		ret = -ENOENT;
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free;
 	}
 
@@ -1625,7 +1623,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
 
 	xt_compat_lock(AF_INET);
 	t = xt_find_table_lock(net, AF_INET, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
 		ret = compat_table_info(private, &info);
@@ -1639,7 +1637,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	xt_compat_unlock(AF_INET);
 	return ret;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 7667f22..9ac92ea 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -38,12 +38,6 @@ static unsigned int
 iptable_filter_hook(void *priv, struct sk_buff *skb,
 		    const struct nf_hook_state *state)
 {
-	if (state->hook == NF_INET_LOCAL_OUT &&
-	    (skb->len < sizeof(struct iphdr) ||
-	     ip_hdrlen(skb) < sizeof(struct iphdr)))
-		/* root is playing with raw sockets. */
-		return NF_ACCEPT;
-
 	return ipt_do_table(skb, state, state->net->ipv4.iptable_filter);
 }
 
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index aebdb33..dea138ca 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -49,11 +49,6 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 	u_int32_t mark;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	/* Save things which could affect route */
 	mark = skb->mark;
 	iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index a1a07b3..0f7255c 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -72,6 +72,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_in,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP_PRI_NAT_DST,
 	},
@@ -79,6 +80,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_out,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP_PRI_NAT_SRC,
 	},
@@ -86,6 +88,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_local_fn,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP_PRI_NAT_DST,
 	},
@@ -93,6 +96,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
 	{
 		.hook		= iptable_nat_ipv4_fn,
 		.pf		= NFPROTO_IPV4,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_NAT_SRC,
 	},
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 2642ecd..a869d1f 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -26,12 +26,6 @@ static unsigned int
 iptable_raw_hook(void *priv, struct sk_buff *skb,
 		 const struct nf_hook_state *state)
 {
-	if (state->hook == NF_INET_LOCAL_OUT &&
-	    (skb->len < sizeof(struct iphdr) ||
-	     ip_hdrlen(skb) < sizeof(struct iphdr)))
-		/* root is playing with raw sockets. */
-		return NF_ACCEPT;
-
 	return ipt_do_table(skb, state, state->net->ipv4.iptable_raw);
 }
 
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index ff22659..e5379fe 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -43,12 +43,6 @@ static unsigned int
 iptable_security_hook(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
-	if (state->hook == NF_INET_LOCAL_OUT &&
-	    (skb->len < sizeof(struct iphdr) ||
-	     ip_hdrlen(skb) < sizeof(struct iphdr)))
-		/* Somebody is playing with raw sockets. */
-		return NF_ACCEPT;
-
 	return ipt_do_table(skb, state, state->net->ipv4.iptable_security);
 }
 
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 89af9d8..de213a3 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -154,11 +154,6 @@ static unsigned int ipv4_conntrack_local(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
 		return NF_ACCEPT;
 
@@ -368,7 +363,7 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
 MODULE_ALIAS("ip_conntrack");
 MODULE_LICENSE("GPL");
 
-static struct nf_conntrack_l4proto *builtin_l4proto4[] = {
+static const struct nf_conntrack_l4proto * const builtin_l4proto4[] = {
 	&nf_conntrack_l4proto_tcp4,
 	&nf_conntrack_l4proto_udp4,
 	&nf_conntrack_l4proto_icmp,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 1849fed..5c15bea 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -22,7 +22,7 @@
 #include <net/netfilter/nf_conntrack_zones.h>
 #include <net/netfilter/nf_log.h>
 
-static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static const unsigned int nf_ct_icmp_timeout = 30*HZ;
 
 static inline struct nf_icmp_net *icmp_pernet(struct net *net)
 {
@@ -351,7 +351,7 @@ static struct nf_proto_net *icmp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.icmp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
 {
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_ICMP,
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
new file mode 100644
index 0000000..b2d01eb
--- /dev/null
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -0,0 +1,284 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/ip.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+/* For layer 4 checksum field offset. */
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
+			      __be32 addr, __be32 new_addr)
+{
+	struct tcphdr *tcph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+		return -1;
+
+	tcph = (void *)(skb_network_header(skb) + thoff);
+	inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
+
+	return 0;
+}
+
+static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
+			      __be32 addr, __be32 new_addr)
+{
+	struct udphdr *udph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
+		return -1;
+
+	udph = (void *)(skb_network_header(skb) + thoff);
+	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+		inet_proto_csum_replace4(&udph->check, skb, addr,
+					 new_addr, true);
+		if (!udph->check)
+			udph->check = CSUM_MANGLED_0;
+	}
+
+	return 0;
+}
+
+static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
+				  unsigned int thoff, __be32 addr,
+				  __be32 new_addr)
+{
+	switch (iph->protocol) {
+	case IPPROTO_TCP:
+		if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	case IPPROTO_UDP:
+		if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	}
+
+	return 0;
+}
+
+static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+			   struct iphdr *iph, unsigned int thoff,
+			   enum flow_offload_tuple_dir dir)
+{
+	__be32 addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = iph->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
+		iph->saddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = iph->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
+		iph->daddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+	csum_replace4(&iph->check, addr, new_addr);
+
+	return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
+}
+
+static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+			   struct iphdr *iph, unsigned int thoff,
+			   enum flow_offload_tuple_dir dir)
+{
+	__be32 addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = iph->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
+		iph->daddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = iph->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
+		iph->saddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
+}
+
+static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+			  enum flow_offload_tuple_dir dir)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	unsigned int thoff = iph->ihl * 4;
+
+	if (flow->flags & FLOW_OFFLOAD_SNAT &&
+	    (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
+	     nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
+		return -1;
+	if (flow->flags & FLOW_OFFLOAD_DNAT &&
+	    (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
+	     nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
+		return -1;
+
+	return 0;
+}
+
+static bool ip_has_options(unsigned int thoff)
+{
+	return thoff != sizeof(struct iphdr);
+}
+
+static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
+			    struct flow_offload_tuple *tuple)
+{
+	struct flow_ports *ports;
+	unsigned int thoff;
+	struct iphdr *iph;
+
+	if (!pskb_may_pull(skb, sizeof(*iph)))
+		return -1;
+
+	iph = ip_hdr(skb);
+	thoff = iph->ihl * 4;
+
+	if (ip_is_fragment(iph) ||
+	    unlikely(ip_has_options(thoff)))
+		return -1;
+
+	if (iph->protocol != IPPROTO_TCP &&
+	    iph->protocol != IPPROTO_UDP)
+		return -1;
+
+	thoff = iph->ihl * 4;
+	if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+		return -1;
+
+	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+
+	tuple->src_v4.s_addr	= iph->saddr;
+	tuple->dst_v4.s_addr	= iph->daddr;
+	tuple->src_port		= ports->source;
+	tuple->dst_port		= ports->dest;
+	tuple->l3proto		= AF_INET;
+	tuple->l4proto		= iph->protocol;
+	tuple->iifidx		= dev->ifindex;
+
+	return 0;
+}
+
+/* Based on ip_exceeds_mtu(). */
+static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+	if (skb->len <= mtu)
+		return false;
+
+	if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0)
+		return false;
+
+	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+		return false;
+
+	return true;
+}
+
+static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rtable *rt)
+{
+	u32 mtu;
+
+	mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
+	if (__nf_flow_exceeds_mtu(skb, mtu))
+		return true;
+
+	return false;
+}
+
+unsigned int
+nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+			const struct nf_hook_state *state)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct nf_flowtable *flow_table = priv;
+	struct flow_offload_tuple tuple = {};
+	enum flow_offload_tuple_dir dir;
+	struct flow_offload *flow;
+	struct net_device *outdev;
+	const struct rtable *rt;
+	struct iphdr *iph;
+	__be32 nexthop;
+
+	if (skb->protocol != htons(ETH_P_IP))
+		return NF_ACCEPT;
+
+	if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
+		return NF_ACCEPT;
+
+	tuplehash = flow_offload_lookup(flow_table, &tuple);
+	if (tuplehash == NULL)
+		return NF_ACCEPT;
+
+	outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
+	if (!outdev)
+		return NF_ACCEPT;
+
+	dir = tuplehash->tuple.dir;
+	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+	rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+	if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
+		return NF_ACCEPT;
+
+	if (skb_try_make_writable(skb, sizeof(*iph)))
+		return NF_DROP;
+
+	if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
+	    nf_flow_nat_ip(flow, skb, dir) < 0)
+		return NF_DROP;
+
+	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+	iph = ip_hdr(skb);
+	ip_decrease_ttl(iph);
+
+	skb->dev = outdev;
+	nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
+
+	return NF_STOLEN;
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
+
+static struct nf_flowtable_type flowtable_ipv4 = {
+	.family		= NFPROTO_IPV4,
+	.params		= &nf_flow_offload_rhash_params,
+	.gc		= nf_flow_offload_work_gc,
+	.hook		= nf_flow_offload_ip_hook,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nf_flow_ipv4_module_init(void)
+{
+	nft_register_flowtable_type(&flowtable_ipv4);
+
+	return 0;
+}
+
+static void __exit nf_flow_ipv4_module_exit(void)
+{
+	nft_unregister_flowtable_type(&flowtable_ipv4);
+}
+
+module_init(nf_flow_ipv4_module_init);
+module_exit(nf_flow_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 0443ca4..f7ff6a36 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -356,11 +356,6 @@ nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
 #endif
 	unsigned int ret;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -396,11 +391,6 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
 	unsigned int ret;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
index 4bbc273..f84c177 100644
--- a/net/ipv4/netfilter/nf_tables_arp.c
+++ b/net/ipv4/netfilter/nf_tables_arp.c
@@ -21,7 +21,8 @@ nft_do_chain_arp(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_unspec(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_unspec(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
@@ -30,12 +31,6 @@ static struct nft_af_info nft_af_arp __read_mostly = {
 	.family		= NFPROTO_ARP,
 	.nhooks		= NF_ARP_NUMHOOKS,
 	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_ARP_IN]		= nft_do_chain_arp,
-		[NF_ARP_OUT]		= nft_do_chain_arp,
-		[NF_ARP_FORWARD]	= nft_do_chain_arp,
-	},
 };
 
 static int nf_tables_arp_init_net(struct net *net)
@@ -73,6 +68,10 @@ static const struct nf_chain_type filter_arp = {
 	.owner		= THIS_MODULE,
 	.hook_mask	= (1 << NF_ARP_IN) |
 			  (1 << NF_ARP_OUT),
+	.hooks		= {
+		[NF_ARP_IN]		= nft_do_chain_arp,
+		[NF_ARP_OUT]		= nft_do_chain_arp,
+	},
 };
 
 static int __init nf_tables_arp_init(void)
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
index 2840a29..f467525 100644
--- a/net/ipv4/netfilter/nf_tables_ipv4.c
+++ b/net/ipv4/netfilter/nf_tables_ipv4.c
@@ -24,40 +24,17 @@ static unsigned int nft_do_chain_ipv4(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv4(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_ipv4_output(void *priv,
-				    struct sk_buff *skb,
-				    const struct nf_hook_state *state)
-{
-	if (unlikely(skb->len < sizeof(struct iphdr) ||
-		     ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
-		if (net_ratelimit())
-			pr_info("nf_tables_ipv4: ignoring short SOCK_RAW "
-				"packet\n");
-		return NF_ACCEPT;
-	}
-
-	return nft_do_chain_ipv4(priv, skb, state);
-}
-
-struct nft_af_info nft_af_ipv4 __read_mostly = {
+static struct nft_af_info nft_af_ipv4 __read_mostly = {
 	.family		= NFPROTO_IPV4,
 	.nhooks		= NF_INET_NUMHOOKS,
 	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv4,
-		[NF_INET_LOCAL_OUT]	= nft_ipv4_output,
-		[NF_INET_FORWARD]	= nft_do_chain_ipv4,
-		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv4,
-		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv4,
-	},
 };
-EXPORT_SYMBOL_GPL(nft_af_ipv4);
 
 static int nf_tables_ipv4_init_net(struct net *net)
 {
@@ -97,6 +74,13 @@ static const struct nf_chain_type filter_ipv4 = {
 			  (1 << NF_INET_FORWARD) |
 			  (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING),
+	.hooks		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv4,
+		[NF_INET_LOCAL_OUT]	= nft_do_chain_ipv4,
+		[NF_INET_FORWARD]	= nft_do_chain_ipv4,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv4,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv4,
+	},
 };
 
 static int __init nf_tables_ipv4_init(void)
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index f5c66a7..f2a4909 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -33,7 +33,8 @@ static unsigned int nft_nat_do_chain(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv4(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 30493be..d965c22 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -33,12 +33,8 @@ static unsigned int nf_route_table_hook(void *priv,
 	const struct iphdr *iph;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr))
-		return NF_ACCEPT;
-
-	nft_set_pktinfo_ipv4(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv4(&pkt, skb);
 
 	mark = skb->mark;
 	iph = ip_hdr(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 43b69af..f0ed031 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1106,7 +1106,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
 		new = true;
 	}
 
-	__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+	__ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
 
 	if (!dst_check(&rt->dst, 0)) {
 		if (new)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f08eebe..d7cf861 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -283,8 +283,6 @@
 #include <asm/ioctls.h>
 #include <net/busy_poll.h>
 
-#include <trace/events/tcp.h>
-
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
@@ -500,11 +498,9 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	const struct tcp_sock *tp = tcp_sk(sk);
 	int state;
 
-	sock_rps_record_flow(sk);
-
 	sock_poll_wait(file, sk_sleep(sk), wait);
 
-	state = sk_state_load(sk);
+	state = inet_sk_state_load(sk);
 	if (state == TCP_LISTEN)
 		return inet_csk_listen_poll(sk);
 
@@ -1106,12 +1102,15 @@ static int linear_payload_sz(bool first_skb)
 	return 0;
 }
 
-static int select_size(const struct sock *sk, bool sg, bool first_skb)
+static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	int tmp = tp->mss_cache;
 
 	if (sg) {
+		if (zc)
+			return 0;
+
 		if (sk_can_gso(sk)) {
 			tmp = linear_payload_sz(first_skb);
 		} else {
@@ -1188,7 +1187,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 	int flags, err, copied = 0;
 	int mss_now = 0, size_goal, copied_syn = 0;
 	bool process_backlog = false;
-	bool sg;
+	bool sg, zc = false;
 	long timeo;
 
 	flags = msg->msg_flags;
@@ -1206,7 +1205,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 			goto out_err;
 		}
 
-		if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG))
+		zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG;
+		if (!zc)
 			uarg->zerocopy = 0;
 	}
 
@@ -1283,6 +1283,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 
 		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
 			bool first_skb;
+			int linear;
 
 new_segment:
 			/* Allocate new segment. If the interface is SG,
@@ -1296,9 +1297,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 				goto restart;
 			}
 			first_skb = tcp_rtx_and_write_queues_empty(sk);
-			skb = sk_stream_alloc_skb(sk,
-						  select_size(sk, sg, first_skb),
-						  sk->sk_allocation,
+			linear = select_size(sk, sg, first_skb, zc);
+			skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
 						  first_skb);
 			if (!skb)
 				goto wait_for_memory;
@@ -1327,13 +1327,13 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 			copy = msg_data_left(msg);
 
 		/* Where to copy to? */
-		if (skb_availroom(skb) > 0) {
+		if (skb_availroom(skb) > 0 && !zc) {
 			/* We have some space in skb head. Superb! */
 			copy = min_t(int, copy, skb_availroom(skb));
 			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
 			if (err)
 				goto do_fault;
-		} else if (!uarg || !uarg->zerocopy) {
+		} else if (!zc) {
 			bool merge = true;
 			int i = skb_shinfo(skb)->nr_frags;
 			struct page_frag *pfrag = sk_page_frag(sk);
@@ -1373,8 +1373,10 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 			pfrag->offset += copy;
 		} else {
 			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
-			if (err == -EMSGSIZE || err == -EEXIST)
+			if (err == -EMSGSIZE || err == -EEXIST) {
+				tcp_mark_push(tp, skb);
 				goto new_segment;
+			}
 			if (err < 0)
 				goto do_error;
 			copy = err;
@@ -1731,8 +1733,8 @@ static void tcp_update_recv_tstamps(struct sk_buff *skb,
 }
 
 /* Similar to __sock_recv_timestamp, but does not require an skb */
-void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
-			struct scm_timestamping *tss)
+static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
+			       struct scm_timestamping *tss)
 {
 	struct timeval tv;
 	bool has_timestamping = false;
@@ -2040,8 +2042,6 @@ void tcp_set_state(struct sock *sk, int state)
 {
 	int oldstate = sk->sk_state;
 
-	trace_tcp_set_state(sk, oldstate, state);
-
 	switch (state) {
 	case TCP_ESTABLISHED:
 		if (oldstate != TCP_ESTABLISHED)
@@ -2065,7 +2065,7 @@ void tcp_set_state(struct sock *sk, int state)
 	/* Change state AFTER socket is unhashed to avoid closed
 	 * socket sitting in hash tables.
 	 */
-	sk_state_store(sk, state);
+	inet_sk_state_store(sk, state);
 
 #ifdef STATE_TRACE
 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2920,7 +2920,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 	if (sk->sk_type != SOCK_STREAM)
 		return;
 
-	info->tcpi_state = sk_state_load(sk);
+	info->tcpi_state = inet_sk_state_load(sk);
 
 	/* Report meaningful fields for all TCP states, including listeners */
 	rate = READ_ONCE(sk->sk_pacing_rate);
@@ -3578,6 +3578,9 @@ void __init tcp_init(void)
 	percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
 	percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
 	inet_hashinfo_init(&tcp_hashinfo);
+	inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
+			    thash_entries, 21,  /* one slot per 2 MB*/
+			    0, 64 * 1024);
 	tcp_hashinfo.bind_bucket_cachep =
 		kmem_cache_create("tcp_bind_bucket",
 				  sizeof(struct inet_bind_bucket), 0,
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index abbf0ed..81148f7 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -24,7 +24,7 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
 {
 	struct tcp_info *info = _info;
 
-	if (sk_state_load(sk) == TCP_LISTEN) {
+	if (inet_sk_state_load(sk) == TCP_LISTEN) {
 		r->idiag_rqueue = sk->sk_ack_backlog;
 		r->idiag_wqueue = sk->sk_max_ack_backlog;
 	} else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 78c192e..018a484 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -379,18 +379,9 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
 			       struct tcp_fastopen_cookie *cookie)
 {
-	unsigned long last_syn_loss = 0;
 	const struct dst_entry *dst;
-	int syn_loss = 0;
 
-	tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
-
-	/* Recurring FO SYN losses: no cookie or data in SYN */
-	if (syn_loss > 1 &&
-	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
-		cookie->len = -1;
-		return false;
-	}
+	tcp_fastopen_cache_get(sk, mss, cookie);
 
 	/* Firewall blackhole issue check */
 	if (tcp_fastopen_active_should_disable(sk)) {
@@ -448,6 +439,8 @@ EXPORT_SYMBOL(tcp_fastopen_defer_connect);
  * following circumstances:
  *   1. client side TFO socket receives out of order FIN
  *   2. client side TFO socket receives out of order RST
+ *   3. client side TFO socket has timed out three times consecutively during
+ *      or after handshake
  * We disable active side TFO globally for 1hr at first. Then if it
  * happens again, we disable it for 2h, then 4h, 8h, ...
  * And we reset the timeout back to 1hr when we see a successful active
@@ -524,3 +517,20 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
 		dst_release(dst);
 	}
 }
+
+void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
+{
+	u32 timeouts = inet_csk(sk)->icsk_retransmits;
+	struct tcp_sock *tp = tcp_sk(sk);
+
+	/* Broken middle-boxes may black-hole Fast Open connection during or
+	 * even after the handshake. Be extremely conservative and pause
+	 * Fast Open globally after hitting the third consecutive timeout or
+	 * exceeding the configured timeout limit.
+	 */
+	if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
+	    (timeouts == 2 || (timeouts < 2 && expired))) {
+		tcp_fastopen_active_disable(sk);
+		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
+	}
+}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 45f750e..ff71b18 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -578,8 +578,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 void tcp_rcv_space_adjust(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	u32 copied;
 	int time;
-	int copied;
 
 	tcp_mstamp_refresh(tp);
 	time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
@@ -602,38 +602,31 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
 	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-		int rcvwin, rcvmem, rcvbuf;
+		int rcvmem, rcvbuf;
+		u64 rcvwin, grow;
 
 		/* minimal window to cope with packet losses, assuming
 		 * steady state. Add some cushion because of small variations.
 		 */
-		rcvwin = (copied << 1) + 16 * tp->advmss;
+		rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
 
-		/* If rate increased by 25%,
-		 *	assume slow start, rcvwin = 3 * copied
-		 * If rate increased by 50%,
-		 *	assume sender can use 2x growth, rcvwin = 4 * copied
-		 */
-		if (copied >=
-		    tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
-			if (copied >=
-			    tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
-				rcvwin <<= 1;
-			else
-				rcvwin += (rcvwin >> 1);
-		}
+		/* Accommodate for sender rate increase (eg. slow start) */
+		grow = rcvwin * (copied - tp->rcvq_space.space);
+		do_div(grow, tp->rcvq_space.space);
+		rcvwin += (grow << 1);
 
 		rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
 		while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
 			rcvmem += 128;
 
-		rcvbuf = min(rcvwin / tp->advmss * rcvmem,
-			     sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+		do_div(rcvwin, tp->advmss);
+		rcvbuf = min_t(u64, rcvwin * rcvmem,
+			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
 		if (rcvbuf > sk->sk_rcvbuf) {
 			sk->sk_rcvbuf = rcvbuf;
 
 			/* Make the window clamp follow along.  */
-			tp->window_clamp = rcvwin;
+			tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
 		}
 	}
 	tp->rcvq_space.space = copied;
@@ -5306,6 +5299,9 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 	unsigned int len = skb->len;
 	struct tcp_sock *tp = tcp_sk(sk);
 
+	/* TCP congestion window tracking */
+	trace_tcp_probe(sk, skb);
+
 	tcp_mstamp_refresh(tp);
 	if (unlikely(!sk->sk_rx_dst))
 		inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 94e2835..5d20324 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1911,7 +1911,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
 	/* Clean up the MD5 key list, if any */
 	if (tp->md5sig_info) {
 		tcp_clear_md5_list(sk);
-		kfree_rcu(tp->md5sig_info, rcu);
+		kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
 		tp->md5sig_info = NULL;
 	}
 #endif
@@ -2281,7 +2281,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
 		timer_expires = jiffies;
 	}
 
-	state = sk_state_load(sk);
+	state = inet_sk_state_load(sk);
 	if (state == TCP_LISTEN)
 		rx_queue = sk->sk_ack_backlog;
 	else
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 7097f92..759e6bc 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -546,8 +546,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
 static DEFINE_SEQLOCK(fastopen_seqlock);
 
 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-			    struct tcp_fastopen_cookie *cookie,
-			    int *syn_loss, unsigned long *last_syn_loss)
+			    struct tcp_fastopen_cookie *cookie)
 {
 	struct tcp_metrics_block *tm;
 
@@ -564,8 +563,6 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
 			*cookie = tfom->cookie;
 			if (cookie->len <= 0 && tfom->try_exp == 1)
 				cookie->exp = true;
-			*syn_loss = tfom->syn_loss;
-			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
 		} while (read_seqretry(&fastopen_seqlock, seq));
 	}
 	rcu_read_unlock();
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b079b61..a8384b0c 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -316,9 +316,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
 		 */
 		local_bh_disable();
 		inet_twsk_schedule(tw, timeo);
-		/* Linkage updates. */
-		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
-		inet_twsk_put(tw);
+		/* Linkage updates.
+		 * Note that access to tw after this point is illegal.
+		 */
+		inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 		local_bh_enable();
 	} else {
 		/* Sorry, if we're out of memory, just CLOSE this
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a4d214c..95461f0 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1944,7 +1944,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
 
 	in_flight = tcp_packets_in_flight(tp);
 
-	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
+	BUG_ON(tcp_skb_pcount(skb) <= 1);
+	BUG_ON(tp->snd_cwnd <= in_flight);
 
 	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
 
@@ -2414,15 +2415,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
 
 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
 	/* Schedule a loss probe in 2*RTT for SACK capable connections
-	 * in Open state, that are either limited by cwnd or application.
+	 * not in loss recovery, that are either limited by cwnd or application.
 	 */
 	if ((early_retrans != 3 && early_retrans != 4) ||
 	    !tp->packets_out || !tcp_is_sack(tp) ||
-	    icsk->icsk_ca_state != TCP_CA_Open)
-		return false;
-
-	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
-	     !tcp_write_queue_empty(sk))
+	    (icsk->icsk_ca_state != TCP_CA_Open &&
+	     icsk->icsk_ca_state != TCP_CA_CWR))
 		return false;
 
 	/* Probe timeout is 2*rtt. Add minimum RTO to account
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
deleted file mode 100644
index 697f4c6..0000000
--- a/net/ipv4/tcp_probe.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * tcpprobe - Observe the TCP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/tcp.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/module.h>
-#include <linux/ktime.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/tcp.h>
-
-MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
-MODULE_DESCRIPTION("TCP cwnd snooper");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("1.1");
-
-static int port __read_mostly;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int bufsize __read_mostly = 4096;
-MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
-module_param(bufsize, uint, 0);
-
-static unsigned int fwmark __read_mostly;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int full __read_mostly;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "tcpprobe";
-
-struct tcp_log {
-	ktime_t tstamp;
-	union {
-		struct sockaddr		raw;
-		struct sockaddr_in	v4;
-		struct sockaddr_in6	v6;
-	}	src, dst;
-	u16	length;
-	u32	snd_nxt;
-	u32	snd_una;
-	u32	snd_wnd;
-	u32	rcv_wnd;
-	u32	snd_cwnd;
-	u32	ssthresh;
-	u32	srtt;
-};
-
-static struct {
-	spinlock_t	lock;
-	wait_queue_head_t wait;
-	ktime_t		start;
-	u32		lastcwnd;
-
-	unsigned long	head, tail;
-	struct tcp_log	*log;
-} tcp_probe;
-
-static inline int tcp_probe_used(void)
-{
-	return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
-}
-
-static inline int tcp_probe_avail(void)
-{
-	return bufsize - tcp_probe_used() - 1;
-}
-
-#define tcp_probe_copy_fl_to_si4(inet, si4, mem)		\
-	do {							\
-		si4.sin_family = AF_INET;			\
-		si4.sin_port = inet->inet_##mem##port;		\
-		si4.sin_addr.s_addr = inet->inet_##mem##addr;	\
-	} while (0)						\
-
-/*
- * Hook inserted to be called before each receive packet.
- * Note: arguments must match tcp_rcv_established()!
- */
-static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-				 const struct tcphdr *th)
-{
-	unsigned int len = skb->len;
-	const struct tcp_sock *tp = tcp_sk(sk);
-	const struct inet_sock *inet = inet_sk(sk);
-
-	/* Only update if port or skb mark matches */
-	if (((port == 0 && fwmark == 0) ||
-	     ntohs(inet->inet_dport) == port ||
-	     ntohs(inet->inet_sport) == port ||
-	     (fwmark > 0 && skb->mark == fwmark)) &&
-	    (full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
-
-		spin_lock(&tcp_probe.lock);
-		/* If log fills, just silently drop */
-		if (tcp_probe_avail() > 1) {
-			struct tcp_log *p = tcp_probe.log + tcp_probe.head;
-
-			p->tstamp = ktime_get();
-			switch (sk->sk_family) {
-			case AF_INET:
-				tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
-				tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
-				break;
-			case AF_INET6:
-				memset(&p->src.v6, 0, sizeof(p->src.v6));
-				memset(&p->dst.v6, 0, sizeof(p->dst.v6));
-#if IS_ENABLED(CONFIG_IPV6)
-				p->src.v6.sin6_family = AF_INET6;
-				p->src.v6.sin6_port = inet->inet_sport;
-				p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
-
-				p->dst.v6.sin6_family = AF_INET6;
-				p->dst.v6.sin6_port = inet->inet_dport;
-				p->dst.v6.sin6_addr = sk->sk_v6_daddr;
-#endif
-				break;
-			default:
-				BUG();
-			}
-
-			p->length = len;
-			p->snd_nxt = tp->snd_nxt;
-			p->snd_una = tp->snd_una;
-			p->snd_cwnd = tp->snd_cwnd;
-			p->snd_wnd = tp->snd_wnd;
-			p->rcv_wnd = tp->rcv_wnd;
-			p->ssthresh = tcp_current_ssthresh(sk);
-			p->srtt = tp->srtt_us >> 3;
-
-			tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
-		}
-		tcp_probe.lastcwnd = tp->snd_cwnd;
-		spin_unlock(&tcp_probe.lock);
-
-		wake_up(&tcp_probe.wait);
-	}
-
-	jprobe_return();
-}
-
-static struct jprobe tcp_jprobe = {
-	.kp = {
-		.symbol_name	= "tcp_rcv_established",
-	},
-	.entry	= jtcp_rcv_established,
-};
-
-static int tcpprobe_open(struct inode *inode, struct file *file)
-{
-	/* Reset (empty) log */
-	spin_lock_bh(&tcp_probe.lock);
-	tcp_probe.head = tcp_probe.tail = 0;
-	tcp_probe.start = ktime_get();
-	spin_unlock_bh(&tcp_probe.lock);
-
-	return 0;
-}
-
-static int tcpprobe_sprint(char *tbuf, int n)
-{
-	const struct tcp_log *p
-		= tcp_probe.log + tcp_probe.tail;
-	struct timespec64 ts
-		= ktime_to_timespec64(ktime_sub(p->tstamp, tcp_probe.start));
-
-	return scnprintf(tbuf, n,
-			"%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
-			(unsigned long)ts.tv_sec,
-			(unsigned long)ts.tv_nsec,
-			&p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
-			p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
-}
-
-static ssize_t tcpprobe_read(struct file *file, char __user *buf,
-			     size_t len, loff_t *ppos)
-{
-	int error = 0;
-	size_t cnt = 0;
-
-	if (!buf)
-		return -EINVAL;
-
-	while (cnt < len) {
-		char tbuf[256];
-		int width;
-
-		/* Wait for data in buffer */
-		error = wait_event_interruptible(tcp_probe.wait,
-						 tcp_probe_used() > 0);
-		if (error)
-			break;
-
-		spin_lock_bh(&tcp_probe.lock);
-		if (tcp_probe.head == tcp_probe.tail) {
-			/* multiple readers race? */
-			spin_unlock_bh(&tcp_probe.lock);
-			continue;
-		}
-
-		width = tcpprobe_sprint(tbuf, sizeof(tbuf));
-
-		if (cnt + width < len)
-			tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1);
-
-		spin_unlock_bh(&tcp_probe.lock);
-
-		/* if record greater than space available
-		   return partial buffer (so far) */
-		if (cnt + width >= len)
-			break;
-
-		if (copy_to_user(buf + cnt, tbuf, width))
-			return -EFAULT;
-		cnt += width;
-	}
-
-	return cnt == 0 ? error : cnt;
-}
-
-static const struct file_operations tcpprobe_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = tcpprobe_open,
-	.read    = tcpprobe_read,
-	.llseek  = noop_llseek,
-};
-
-static __init int tcpprobe_init(void)
-{
-	int ret = -ENOMEM;
-
-	/* Warning: if the function signature of tcp_rcv_established,
-	 * has been changed, you also have to change the signature of
-	 * jtcp_rcv_established, otherwise you end up right here!
-	 */
-	BUILD_BUG_ON(__same_type(tcp_rcv_established,
-				 jtcp_rcv_established) == 0);
-
-	init_waitqueue_head(&tcp_probe.wait);
-	spin_lock_init(&tcp_probe.lock);
-
-	if (bufsize == 0)
-		return -EINVAL;
-
-	bufsize = roundup_pow_of_two(bufsize);
-	tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL);
-	if (!tcp_probe.log)
-		goto err0;
-
-	if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops))
-		goto err0;
-
-	ret = register_jprobe(&tcp_jprobe);
-	if (ret)
-		goto err1;
-
-	pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
-		port, fwmark, bufsize);
-	return 0;
- err1:
-	remove_proc_entry(procname, init_net.proc_net);
- err0:
-	kfree(tcp_probe.log);
-	return ret;
-}
-module_init(tcpprobe_init);
-
-static __exit void tcpprobe_exit(void)
-{
-	remove_proc_entry(procname, init_net.proc_net);
-	unregister_jprobe(&tcp_jprobe);
-	kfree(tcp_probe.log);
-}
-module_exit(tcpprobe_exit);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 968fda1..6db3124 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -183,11 +183,6 @@ static int tcp_write_timeout(struct sock *sk)
 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 		if (icsk->icsk_retransmits) {
 			dst_negative_advice(sk);
-			if (tp->syn_fastopen || tp->syn_data)
-				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-			if (tp->syn_data && icsk->icsk_retransmits == 1)
-				NET_INC_STATS(sock_net(sk),
-					      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
 		} else if (!tp->syn_data && !tp->syn_fastopen) {
 			sk_rethink_txhash(sk);
 		}
@@ -195,17 +190,6 @@ static int tcp_write_timeout(struct sock *sk)
 		expired = icsk->icsk_retransmits >= retry_until;
 	} else {
 		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
-			/* Some middle-boxes may black-hole Fast Open _after_
-			 * the handshake. Therefore we conservatively disable
-			 * Fast Open on this path on recurring timeouts after
-			 * successful Fast Open.
-			 */
-			if (tp->syn_data_acked) {
-				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-				if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
-					NET_INC_STATS(sock_net(sk),
-						      LINUX_MIB_TCPFASTOPENACTIVEFAIL);
-			}
 			/* Black hole detection */
 			tcp_mtu_probing(icsk, sk);
 
@@ -228,6 +212,7 @@ static int tcp_write_timeout(struct sock *sk)
 		expired = retransmits_timed_out(sk, retry_until,
 						icsk->icsk_user_timeout);
 	}
+	tcp_fastopen_active_detect_blackhole(sk, expired);
 	if (expired) {
 		/* Has it gone just too far? */
 		tcp_write_err(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e4ff25c..db72619 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -357,18 +357,12 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
 }
 EXPORT_SYMBOL(udp_lib_get_port);
 
-static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
-			      unsigned int port)
-{
-	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
-}
-
 int udp_v4_get_port(struct sock *sk, unsigned short snum)
 {
 	unsigned int hash2_nulladdr =
-		udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
+		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
 	unsigned int hash2_partial =
-		udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
+		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
 
 	/* precompute partial secondary hash */
 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -445,7 +439,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
 				     struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	result = NULL;
@@ -454,23 +448,16 @@ static struct sock *udp4_lib_lookup2(struct net *net,
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp_ehashfn(net, daddr, hnum,
 						   saddr, sport);
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			badness = score;
 			result = sk;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -488,11 +475,11 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 	bool exact_dif = udp_lib_exact_dif_match(net, skb);
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	if (hslot->count > 10) {
-		hash2 = udp4_portaddr_hash(net, daddr, hnum);
+		hash2 = ipv4_portaddr_hash(net, daddr, hnum);
 		slot2 = hash2 & udptable->mask;
 		hslot2 = &udptable->hash2[slot2];
 		if (hslot->count < hslot2->count)
@@ -503,7 +490,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 					  exact_dif, hslot2, skb);
 		if (!result) {
 			unsigned int old_slot2 = slot2;
-			hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+			hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
 			slot2 = hash2 & udptable->mask;
 			/* avoid searching the same slot again. */
 			if (unlikely(slot2 == old_slot2))
@@ -526,23 +513,16 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp_ehashfn(net, daddr, hnum,
 						   saddr, sport);
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			badness = score;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -1775,7 +1755,7 @@ EXPORT_SYMBOL(udp_lib_rehash);
 
 static void udp_v4_rehash(struct sock *sk)
 {
-	u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
 					  inet_sk(sk)->inet_rcv_saddr,
 					  inet_sk(sk)->inet_num);
 	udp_lib_rehash(sk, new_hash);
@@ -1966,9 +1946,9 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 	struct sk_buff *nskb;
 
 	if (use_hash2) {
-		hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
+		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
 			    udptable->mask;
-		hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
+		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
 start_lookup:
 		hslot = &udptable->hash2[hash2];
 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
@@ -2200,7 +2180,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
 					    int dif, int sdif)
 {
 	unsigned short hnum = ntohs(loc_port);
-	unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
 	unsigned int slot2 = hash2 & udp_table.mask;
 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
 	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
@@ -2510,8 +2490,6 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 	if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
 		mask |= POLLIN | POLLRDNORM;
 
-	sock_rps_record_flow(sk);
-
 	/* Check for false positives due to checksum errors */
 	if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
 	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index e6265e2..8affc6d 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -62,7 +62,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 	top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
 		0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
 
-	top_iph->ttl = ip4_dst_hoplimit(dst->child);
+	top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
 
 	top_iph->saddr = x->props.saddr.a4;
 	top_iph->daddr = x->id.daddr.a4;
@@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
 	__skb_push(skb, skb->mac_len);
 	return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
-	if (xo->flags & XFRM_GSO_SEGMENT) {
-		skb->network_header = skb->network_header - x->props.header_len;
+	if (xo->flags & XFRM_GSO_SEGMENT)
 		skb->transport_header = skb->network_header +
 					sizeof(struct iphdr);
-	}
 
 	skb_reset_mac_len(skb);
 	pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f49bd78..2435f7a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3438,6 +3438,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 		} else if (event == NETDEV_CHANGE) {
 			if (!addrconf_link_ready(dev)) {
 				/* device is still not ready. */
+				rt6_sync_down_dev(dev, event);
 				break;
 			}
 
@@ -3449,6 +3450,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 					 * multicast snooping switches
 					 */
 					ipv6_mc_up(idev);
+					rt6_sync_up(dev, RTNH_F_LINKDOWN);
 					break;
 				}
 				idev->if_flags |= IF_READY;
@@ -3484,6 +3486,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 			if (run_pending)
 				addrconf_dad_run(idev);
 
+			/* Device has an address by now */
+			rt6_sync_up(dev, RTNH_F_DEAD);
+
 			/*
 			 * If the MTU changed during the interface down,
 			 * when the interface up, the changed MTU must be
@@ -3577,6 +3582,7 @@ static bool addr_is_local(const struct in6_addr *addr)
 
 static int addrconf_ifdown(struct net_device *dev, int how)
 {
+	unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN;
 	struct net *net = dev_net(dev);
 	struct inet6_dev *idev;
 	struct inet6_ifaddr *ifa, *tmp;
@@ -3586,8 +3592,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
 	ASSERT_RTNL();
 
-	rt6_ifdown(net, dev);
-	neigh_ifdown(&nd_tbl, dev);
+	rt6_disable_ip(dev, event);
 
 	idev = __in6_dev_get(dev);
 	if (!idev)
@@ -6595,27 +6600,45 @@ int __init addrconf_init(void)
 
 	rtnl_af_register(&inet6_ops);
 
-	err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
-			      0);
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
+				   NULL, inet6_dump_ifinfo, 0);
 	if (err < 0)
 		goto errout;
 
-	/* Only the first call to __rtnl_register can fail */
-	__rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, 0);
-	__rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, 0);
-	__rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
-			inet6_dump_ifaddr, RTNL_FLAG_DOIT_UNLOCKED);
-	__rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
-			inet6_dump_ifmcaddr, 0);
-	__rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
-			inet6_dump_ifacaddr, 0);
-	__rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
-			inet6_netconf_dump_devconf, RTNL_FLAG_DOIT_UNLOCKED);
-
-	ipv6_addr_label_rtnl_register();
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
+				   inet6_rtm_newaddr, NULL, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
+				   inet6_rtm_deladdr, NULL, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
+				   inet6_rtm_getaddr, inet6_dump_ifaddr,
+				   RTNL_FLAG_DOIT_UNLOCKED);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
+				   NULL, inet6_dump_ifmcaddr, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
+				   NULL, inet6_dump_ifacaddr, 0);
+	if (err < 0)
+		goto errout;
+	err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
+				   inet6_netconf_get_devconf,
+				   inet6_netconf_dump_devconf,
+				   RTNL_FLAG_DOIT_UNLOCKED);
+	if (err < 0)
+		goto errout;
+	err = ipv6_addr_label_rtnl_register();
+	if (err < 0)
+		goto errout;
 
 	return 0;
 errout:
+	rtnl_unregister_all(PF_INET6);
 	rtnl_af_unregister(&inet6_ops);
 	unregister_netdevice_notifier(&ipv6_dev_notf);
 errlo:
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 00e1f8ee..1d6ced3 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -547,13 +547,22 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 	return err;
 }
 
-void __init ipv6_addr_label_rtnl_register(void)
+int __init ipv6_addr_label_rtnl_register(void)
 {
-	__rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel,
-			NULL, RTNL_FLAG_DOIT_UNLOCKED);
-	__rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel,
-			NULL, RTNL_FLAG_DOIT_UNLOCKED);
-	__rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get,
-			ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED);
-}
+	int ret;
 
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDRLABEL,
+				   ip6addrlbl_newdel,
+				   NULL, RTNL_FLAG_DOIT_UNLOCKED);
+	if (ret < 0)
+		return ret;
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDRLABEL,
+				   ip6addrlbl_newdel,
+				   NULL, RTNL_FLAG_DOIT_UNLOCKED);
+	if (ret < 0)
+		return ret;
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDRLABEL,
+				   ip6addrlbl_get,
+				   ip6addrlbl_dump, RTNL_FLAG_DOIT_UNLOCKED);
+	return ret;
+}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a1f9187..fbf08ce 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -221,8 +221,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
 	if (__ipv6_addr_needs_scope_id(addr_type)) {
 		if (addr_len >= sizeof(struct sockaddr_in6) &&
 		    usin->sin6_scope_id) {
-			if (sk->sk_bound_dev_if &&
-			    sk->sk_bound_dev_if != usin->sin6_scope_id) {
+			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) {
 				err = -EINVAL;
 				goto out;
 			}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a902ff8..7c888c6 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
 	struct sk_buff *skb = base->data;
+	struct xfrm_offload *xo = xfrm_offload(skb);
 	void *tmp;
-	struct dst_entry *dst = skb_dst(skb);
-	struct xfrm_state *x = dst->xfrm;
+	struct xfrm_state *x;
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME))
+		x = skb->sp->xvec[skb->sp->len - 1];
+	else
+		x = skb_dst(skb)->xfrm;
 
 	tmp = ESP_SKB_CB(skb)->tmp;
 	esp_ssg_unref(x, tmp);
 	kfree(tmp);
-	xfrm_output_resume(skb, err);
+
+	if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+		if (err) {
+			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+			kfree_skb(skb);
+			return;
+		}
+
+		skb_push(skb, skb->data - skb_mac_header(skb));
+		secpath_reset(skb);
+		xfrm_dev_resume(skb);
+	} else {
+		xfrm_output_resume(skb, err);
+	}
 }
 
 /* Move ESP header back into place. */
@@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x)
 	char aead_name[CRYPTO_MAX_ALG_NAME];
 	struct crypto_aead *aead;
 	int err;
-	u32 mask = 0;
 
 	err = -ENAMETOOLONG;
 	if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
 		     x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
 		goto error;
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(aead_name, 0, mask);
+	aead = crypto_alloc_aead(aead_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
@@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x)
 	char authenc_name[CRYPTO_MAX_ALG_NAME];
 	unsigned int keylen;
 	int err;
-	u32 mask = 0;
 
 	err = -EINVAL;
 	if (!x->ealg)
@@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x)
 			goto error;
 	}
 
-	if (x->xso.offload_handle)
-		mask |= CRYPTO_ALG_ASYNC;
-
-	aead = crypto_alloc_aead(authenc_name, 0, mask);
+	aead = crypto_alloc_aead(authenc_name, 0, 0);
 	err = PTR_ERR(aead);
 	if (IS_ERR(aead))
 		goto error;
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 333a478..0bb7d54 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
 				        netdev_features_t features)
 {
-	__u32 seq;
-	int err = 0;
-	struct sk_buff *skb2;
 	struct xfrm_state *x;
 	struct ip_esp_hdr *esph;
 	struct crypto_aead *aead;
-	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	netdev_features_t esp_features = features;
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
 	if (!xo)
-		goto out;
-
-	seq = xo->seq.low;
+		return ERR_PTR(-EINVAL);
 
 	x = skb->sp->xvec[skb->sp->len - 1];
 	aead = x->data;
 	esph = ip_esp_hdr(skb);
 
 	if (esph->spi != x->id.spi)
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
-		goto out;
+		return ERR_PTR(-EINVAL);
 
 	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 
 	skb->encap_hdr_csum = 1;
 
-	if (!(features & NETIF_F_HW_ESP))
+	if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
+	    (x->xso.dev != skb->dev))
 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
-	segs = x->outer_mode->gso_segment(x, skb, esp_features);
-	if (IS_ERR_OR_NULL(segs))
-		goto out;
+	xo->flags |= XFRM_GSO_SEGMENT;
 
-	__skb_pull(skb, skb->data - skb_mac_header(skb));
-
-	skb2 = segs;
-	do {
-		struct sk_buff *nskb = skb2->next;
-
-		xo = xfrm_offload(skb2);
-		xo->flags |= XFRM_GSO_SEGMENT;
-		xo->seq.low = seq;
-		xo->seq.hi = xfrm_replay_seqhi(x, seq);
-
-		if(!(features & NETIF_F_HW_ESP))
-			xo->flags |= CRYPTO_FALLBACK;
-
-		x->outer_mode->xmit(x, skb2);
-
-		err = x->type_offload->xmit(x, skb2, esp_features);
-		if (err) {
-			kfree_skb_list(segs);
-			return ERR_PTR(err);
-		}
-
-		if (!skb_is_gso(skb2))
-			seq++;
-		else
-			seq += skb_shinfo(skb2)->gso_segs;
-
-		skb_push(skb2, skb2->mac_len);
-		skb2 = nskb;
-	} while (skb2);
-
-out:
-	return segs;
+	return x->outer_mode->gso_segment(x, skb, esp_features);
 }
 
 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
@@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
 
 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
 {
+	int len;
 	int err;
 	int alen;
 	int blksize;
@@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 	struct crypto_aead *aead;
 	struct esp_info esp;
 	bool hw_offload = true;
+	__u32 seq;
 
 	esp.inplace = true;
 
@@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 			return esp.nfrags;
 	}
 
+	seq = xo->seq.low;
+
 	esph = ip_esp_hdr(skb);
 	esph->spi = x->id.spi;
 
 	skb_push(skb, -skb_network_offset(skb));
 
 	if (xo->flags & XFRM_GSO_SEGMENT) {
-		esph->seq_no = htonl(xo->seq.low);
-	} else {
-		int len;
+		esph->seq_no = htonl(seq);
 
-		len = skb->len - sizeof(struct ipv6hdr);
-		if (len > IPV6_MAXPLEN)
-			len = 0;
-
-		ipv6_hdr(skb)->payload_len = htons(len);
+		if (!skb_is_gso(skb))
+			xo->seq.low++;
+		else
+			xo->seq.low += skb_shinfo(skb)->gso_segs;
 	}
 
+	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
+
+	len = skb->len - sizeof(struct ipv6hdr);
+	if (len > IPV6_MAXPLEN)
+		len = 0;
+
+	ipv6_hdr(skb)->payload_len = htons(len);
+
 	if (hw_offload)
 		return 0;
 
-	esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
-
 	err = esp6_output_tail(x, skb, &esp);
 	if (err)
 		return err;
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 6eb5e68..44c39c5 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -512,9 +512,7 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
 	struct ila_map *ila;
 	int ret;
 
-	ret = rhashtable_walk_start(rhiter);
-	if (ret && ret != -EAGAIN)
-		goto done;
+	rhashtable_walk_start(rhiter);
 
 	for (;;) {
 		ila = rhashtable_walk_next(rhiter);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b01858f..2febe26 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -125,6 +125,40 @@ static inline int compute_score(struct sock *sk, struct net *net,
 }
 
 /* called with rcu_read_lock() */
+static struct sock *inet6_lhash2_lookup(struct net *net,
+		struct inet_listen_hashbucket *ilb2,
+		struct sk_buff *skb, int doff,
+		const struct in6_addr *saddr,
+		const __be16 sport, const struct in6_addr *daddr,
+		const unsigned short hnum, const int dif, const int sdif)
+{
+	bool exact_dif = inet6_exact_dif_match(net, skb);
+	struct inet_connection_sock *icsk;
+	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	u32 phash = 0;
+
+	inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
+		sk = (struct sock *)icsk;
+		score = compute_score(sk, net, hnum, daddr, dif, sdif,
+				      exact_dif);
+		if (score > hiscore) {
+			if (sk->sk_reuseport) {
+				phash = inet6_ehashfn(net, daddr, hnum,
+						      saddr, sport);
+				result = reuseport_select_sock(sk, phash,
+							       skb, doff);
+				if (result)
+					return result;
+			}
+			result = sk;
+			hiscore = score;
+		}
+	}
+
+	return result;
+}
+
 struct sock *inet6_lookup_listener(struct net *net,
 		struct inet_hashinfo *hashinfo,
 		struct sk_buff *skb, int doff,
@@ -134,31 +168,56 @@ struct sock *inet6_lookup_listener(struct net *net,
 {
 	unsigned int hash = inet_lhashfn(net, hnum);
 	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
-	int score, hiscore = 0, matches = 0, reuseport = 0;
 	bool exact_dif = inet6_exact_dif_match(net, skb);
+	struct inet_listen_hashbucket *ilb2;
 	struct sock *sk, *result = NULL;
+	int score, hiscore = 0;
+	unsigned int hash2;
 	u32 phash = 0;
 
+	if (ilb->count <= 10 || !hashinfo->lhash2)
+		goto port_lookup;
+
+	/* Too many sk in the ilb bucket (which is hashed by port alone).
+	 * Try lhash2 (which is hashed by port and addr) instead.
+	 */
+
+	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	result = inet6_lhash2_lookup(net, ilb2, skb, doff,
+				     saddr, sport, daddr, hnum,
+				     dif, sdif);
+	if (result)
+		return result;
+
+	/* Lookup lhash2 with in6addr_any */
+
+	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
+	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
+	if (ilb2->count > ilb->count)
+		goto port_lookup;
+
+	return inet6_lhash2_lookup(net, ilb2, skb, doff,
+				   saddr, sport, daddr, hnum,
+				   dif, sdif);
+
+port_lookup:
 	sk_for_each(sk, &ilb->head) {
 		score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
 		if (score > hiscore) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				phash = inet6_ehashfn(net, daddr, hnum,
 						      saddr, sport);
 				result = reuseport_select_sock(sk, phash,
 							       skb, doff);
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			hiscore = score;
-		} else if (score == hiscore && reuseport) {
-			matches++;
-			if (reciprocal_scale(phash, matches) == 0)
-				result = sk;
-			phash = next_pseudo_random32(phash);
 		}
 	}
 	return result;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 9dcc392..b7c4bef 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -107,16 +107,13 @@ enum {
 
 void fib6_update_sernum(struct rt6_info *rt)
 {
-	struct fib6_table *table = rt->rt6i_table;
 	struct net *net = dev_net(rt->dst.dev);
 	struct fib6_node *fn;
 
-	spin_lock_bh(&table->tb6_lock);
 	fn = rcu_dereference_protected(rt->rt6i_node,
-			lockdep_is_held(&table->tb6_lock));
+			lockdep_is_held(&rt->rt6i_table->tb6_lock));
 	if (fn)
 		fn->fn_sernum = fib6_new_sernum(net);
-	spin_unlock_bh(&table->tb6_lock);
 }
 
 /*
@@ -804,12 +801,6 @@ static struct fib6_node *fib6_add_1(struct net *net,
 	return ln;
 }
 
-static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
-{
-	return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
-	       RTF_GATEWAY;
-}
-
 static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc)
 {
 	int i;
@@ -898,7 +889,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 	ins = &fn->leaf;
 
 	for (iter = leaf; iter;
-	     iter = rcu_dereference_protected(iter->dst.rt6_next,
+	     iter = rcu_dereference_protected(iter->rt6_next,
 				lockdep_is_held(&rt->rt6i_table->tb6_lock))) {
 		/*
 		 *	Search for duplicates
@@ -955,7 +946,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 			break;
 
 next_iter:
-		ins = &iter->dst.rt6_next;
+		ins = &iter->rt6_next;
 	}
 
 	if (fallback_ins && !found) {
@@ -984,7 +975,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 					      &sibling->rt6i_siblings);
 				break;
 			}
-			sibling = rcu_dereference_protected(sibling->dst.rt6_next,
+			sibling = rcu_dereference_protected(sibling->rt6_next,
 				    lockdep_is_held(&rt->rt6i_table->tb6_lock));
 		}
 		/* For each sibling in the list, increment the counter of
@@ -999,6 +990,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 			rt6i_nsiblings++;
 		}
 		BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
+		rt6_multipath_rebalance(temp_sibling);
 	}
 
 	/*
@@ -1014,7 +1006,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 		if (err)
 			return err;
 
-		rcu_assign_pointer(rt->dst.rt6_next, iter);
+		rcu_assign_pointer(rt->rt6_next, iter);
 		atomic_inc(&rt->rt6i_ref);
 		rcu_assign_pointer(rt->rt6i_node, fn);
 		rcu_assign_pointer(*ins, rt);
@@ -1045,7 +1037,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 
 		atomic_inc(&rt->rt6i_ref);
 		rcu_assign_pointer(rt->rt6i_node, fn);
-		rt->dst.rt6_next = iter->dst.rt6_next;
+		rt->rt6_next = iter->rt6_next;
 		rcu_assign_pointer(*ins, rt);
 		call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE,
 					  rt, extack);
@@ -1064,14 +1056,14 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 
 		if (nsiblings) {
 			/* Replacing an ECMP route, remove all siblings */
-			ins = &rt->dst.rt6_next;
+			ins = &rt->rt6_next;
 			iter = rcu_dereference_protected(*ins,
 				    lockdep_is_held(&rt->rt6i_table->tb6_lock));
 			while (iter) {
 				if (iter->rt6i_metric > rt->rt6i_metric)
 					break;
 				if (rt6_qualify_for_ecmp(iter)) {
-					*ins = iter->dst.rt6_next;
+					*ins = iter->rt6_next;
 					iter->rt6i_node = NULL;
 					fib6_purge_rt(iter, fn, info->nl_net);
 					if (rcu_access_pointer(fn->rr_ptr) == iter)
@@ -1080,7 +1072,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 					nsiblings--;
 					info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
 				} else {
-					ins = &iter->dst.rt6_next;
+					ins = &iter->rt6_next;
 				}
 				iter = rcu_dereference_protected(*ins,
 					lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@ -1107,8 +1099,8 @@ void fib6_force_start_gc(struct net *net)
 			  jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
 }
 
-static void fib6_update_sernum_upto_root(struct rt6_info *rt,
-					 int sernum)
+static void __fib6_update_sernum_upto_root(struct rt6_info *rt,
+					   int sernum)
 {
 	struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
 				lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@ -1122,6 +1114,11 @@ static void fib6_update_sernum_upto_root(struct rt6_info *rt,
 	}
 }
 
+void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt)
+{
+	__fib6_update_sernum_upto_root(rt, fib6_new_sernum(net));
+}
+
 /*
  *	Add routing information to the routing tree.
  *	<destination addr>/<source addr>
@@ -1235,7 +1232,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
 
 	err = fib6_add_rt2node(fn, rt, info, mxc, extack);
 	if (!err) {
-		fib6_update_sernum_upto_root(rt, sernum);
+		__fib6_update_sernum_upto_root(rt, sernum);
 		fib6_start_gc(info->nl_net, rt);
 	}
 
@@ -1664,7 +1661,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
 	WARN_ON_ONCE(rt->rt6i_flags & RTF_CACHE);
 
 	/* Unlink it */
-	*rtp = rt->dst.rt6_next;
+	*rtp = rt->rt6_next;
 	rt->rt6i_node = NULL;
 	net->ipv6.rt6_stats->fib_rt_entries--;
 	net->ipv6.rt6_stats->fib_discarded_routes++;
@@ -1685,6 +1682,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
 			sibling->rt6i_nsiblings--;
 		rt->rt6i_nsiblings = 0;
 		list_del_init(&rt->rt6i_siblings);
+		rt6_multipath_rebalance(next_sibling);
 	}
 
 	/* Adjust walkers */
@@ -1692,7 +1690,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
 	FOR_WALKERS(net, w) {
 		if (w->state == FWS_C && w->leaf == rt) {
 			RT6_TRACE("walker %p adjusted by delroute\n", w);
-			w->leaf = rcu_dereference_protected(rt->dst.rt6_next,
+			w->leaf = rcu_dereference_protected(rt->rt6_next,
 					    lockdep_is_held(&table->tb6_lock));
 			if (!w->leaf)
 				w->state = FWS_U;
@@ -1756,7 +1754,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
 			fib6_del_route(table, fn, rtp, info);
 			return 0;
 		}
-		rtp_next = &cur->dst.rt6_next;
+		rtp_next = &cur->rt6_next;
 	}
 	return -ENOENT;
 }
@@ -1912,7 +1910,7 @@ static int fib6_clean_node(struct fib6_walker *w)
 
 	for_each_fib6_walker_rt(w) {
 		res = c->func(rt, c->arg);
-		if (res < 0) {
+		if (res == -1) {
 			w->leaf = rt;
 			res = fib6_del(rt, &info);
 			if (res) {
@@ -1925,6 +1923,12 @@ static int fib6_clean_node(struct fib6_walker *w)
 				continue;
 			}
 			return 0;
+		} else if (res == -2) {
+			if (WARN_ON(!rt->rt6i_nsiblings))
+				continue;
+			rt = list_last_entry(&rt->rt6i_siblings,
+					     struct rt6_info, rt6i_siblings);
+			continue;
 		}
 		WARN_ON(res != 0);
 	}
@@ -1936,7 +1940,8 @@ static int fib6_clean_node(struct fib6_walker *w)
  *	Convenient frontend to tree walker.
  *
  *	func is called on each route.
- *		It may return -1 -> delete this route.
+ *		It may return -2 -> skip multipath route.
+ *			      -1 -> delete this route.
  *		              0  -> continue walking
  */
 
@@ -2128,7 +2133,6 @@ static void fib6_net_exit(struct net *net)
 {
 	unsigned int i;
 
-	rt6_ifdown(net, NULL);
 	del_timer_sync(&net->ipv6.ip6_fib_timer);
 
 	for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
@@ -2167,8 +2171,8 @@ int __init fib6_init(void)
 	if (ret)
 		goto out_kmem_cache_create;
 
-	ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
-			      0);
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL,
+				   inet6_dump_fib, 0);
 	if (ret)
 		goto out_unregister_subsys;
 
@@ -2233,7 +2237,7 @@ static int ipv6_route_yield(struct fib6_walker *w)
 
 	do {
 		iter->w.leaf = rcu_dereference_protected(
-				iter->w.leaf->dst.rt6_next,
+				iter->w.leaf->rt6_next,
 				lockdep_is_held(&iter->tbl->tb6_lock));
 		iter->skip--;
 		if (!iter->skip && iter->w.leaf)
@@ -2299,7 +2303,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 	if (!v)
 		goto iter_table;
 
-	n = rcu_dereference_bh(((struct rt6_info *)v)->dst.rt6_next);
+	n = rcu_dereference_bh(((struct rt6_info *)v)->rt6_next);
 	if (n) {
 		++*pos;
 		return n;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 7726959..db99446 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -55,6 +55,8 @@
 #include <net/ip6_route.h>
 #include <net/ip6_tunnel.h>
 #include <net/gre.h>
+#include <net/erspan.h>
+#include <net/dst_metadata.h>
 
 
 static bool log_ecn_error = true;
@@ -68,11 +70,13 @@ static unsigned int ip6gre_net_id __read_mostly;
 struct ip6gre_net {
 	struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
 
+	struct ip6_tnl __rcu *collect_md_tun;
 	struct net_device *fb_tunnel_dev;
 };
 
 static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
 static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
+static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
 static int ip6gre_tunnel_init(struct net_device *dev);
 static void ip6gre_tunnel_setup(struct net_device *dev);
 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
@@ -121,7 +125,8 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
 	unsigned int h1 = HASH_KEY(key);
 	struct ip6_tnl *t, *cand = NULL;
 	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
-	int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
+	int dev_type = (gre_proto == htons(ETH_P_TEB) ||
+			gre_proto == htons(ETH_P_ERSPAN)) ?
 		       ARPHRD_ETHER : ARPHRD_IP6GRE;
 	int score, cand_score = 4;
 
@@ -226,6 +231,10 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
 	if (cand)
 		return cand;
 
+	t = rcu_dereference(ign->collect_md_tun);
+	if (t && t->dev->flags & IFF_UP)
+		return t;
+
 	dev = ign->fb_tunnel_dev;
 	if (dev->flags & IFF_UP)
 		return netdev_priv(dev);
@@ -261,6 +270,9 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
 {
 	struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
 
+	if (t->parms.collect_md)
+		rcu_assign_pointer(ign->collect_md_tun, t);
+
 	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
 	rcu_assign_pointer(*tp, t);
 }
@@ -270,6 +282,9 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
 	struct ip6_tnl __rcu **tp;
 	struct ip6_tnl *iter;
 
+	if (t->parms.collect_md)
+		rcu_assign_pointer(ign->collect_md_tun, NULL);
+
 	for (tp = ip6gre_bucket(ign, t);
 	     (iter = rtnl_dereference(*tp)) != NULL;
 	     tp = &iter->next) {
@@ -460,7 +475,107 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
 				      tpi->proto);
 	if (tunnel) {
-		ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+		if (tunnel->parms.collect_md) {
+			struct metadata_dst *tun_dst;
+			__be64 tun_id;
+			__be16 flags;
+
+			flags = tpi->flags;
+			tun_id = key32_to_tunnel_id(tpi->key);
+
+			tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
+			if (!tun_dst)
+				return PACKET_REJECT;
+
+			ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
+		} else {
+			ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+		}
+
+		return PACKET_RCVD;
+	}
+
+	return PACKET_REJECT;
+}
+
+static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
+			 struct tnl_ptk_info *tpi)
+{
+	struct erspan_base_hdr *ershdr;
+	struct erspan_metadata *pkt_md;
+	const struct ipv6hdr *ipv6h;
+	struct ip6_tnl *tunnel;
+	u8 ver;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
+		return PACKET_REJECT;
+
+	ipv6h = ipv6_hdr(skb);
+	ershdr = (struct erspan_base_hdr *)skb->data;
+	ver = (ntohs(ershdr->ver_vlan) & VER_MASK) >> VER_OFFSET;
+	tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK);
+
+	tunnel = ip6gre_tunnel_lookup(skb->dev,
+				      &ipv6h->saddr, &ipv6h->daddr, tpi->key,
+				      tpi->proto);
+	if (tunnel) {
+		int len = erspan_hdr_len(ver);
+
+		if (unlikely(!pskb_may_pull(skb, len)))
+			return PACKET_REJECT;
+
+		ershdr = (struct erspan_base_hdr *)skb->data;
+		pkt_md = (struct erspan_metadata *)(ershdr + 1);
+
+		if (__iptunnel_pull_header(skb, len,
+					   htons(ETH_P_TEB),
+					   false, false) < 0)
+			return PACKET_REJECT;
+
+		if (tunnel->parms.collect_md) {
+			struct metadata_dst *tun_dst;
+			struct ip_tunnel_info *info;
+			struct erspan_metadata *md;
+			__be64 tun_id;
+			__be16 flags;
+
+			tpi->flags |= TUNNEL_KEY;
+			flags = tpi->flags;
+			tun_id = key32_to_tunnel_id(tpi->key);
+
+			tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
+						  sizeof(*md));
+			if (!tun_dst)
+				return PACKET_REJECT;
+
+			info = &tun_dst->u.tun_info;
+			md = ip_tunnel_info_opts(info);
+
+			memcpy(md, pkt_md, sizeof(*md));
+			md->version = ver;
+			info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
+			info->options_len = sizeof(*md);
+
+			ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
+
+		} else {
+			tunnel->parms.erspan_ver = ver;
+
+			if (ver == 1) {
+				tunnel->parms.index = ntohl(pkt_md->u.index);
+			} else {
+				u16 md2_flags;
+				u16 dir, hwid;
+
+				md2_flags = ntohs(pkt_md->u.md2.flags);
+				dir = (md2_flags & DIR_MASK) >> DIR_OFFSET;
+				hwid = (md2_flags & HWID_MASK) >> HWID_OFFSET;
+				tunnel->parms.dir = dir;
+				tunnel->parms.hwid = hwid;
+			}
+
+			ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+		}
 
 		return PACKET_RCVD;
 	}
@@ -481,9 +596,17 @@ static int gre_rcv(struct sk_buff *skb)
 	if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
 		goto drop;
 
+	if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
+		     tpi.proto == htons(ETH_P_ERSPAN2))) {
+		if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
+			return 0;
+		goto out;
+	}
+
 	if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
 		return 0;
 
+out:
 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 drop:
 	kfree_skb(skb);
@@ -496,6 +619,78 @@ static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
+static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
+				     struct net_device *dev,
+				     struct flowi6 *fl6, __u8 *dsfield,
+				     int *encap_limit)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	struct ip6_tnl *t = netdev_priv(dev);
+
+	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		*encap_limit = t->parms.encap_limit;
+
+	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		*dsfield = ipv4_get_dsfield(iph);
+	else
+		*dsfield = ip6_tclass(t->parms.flowinfo);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6->flowi6_mark = skb->mark;
+	else
+		fl6->flowi6_mark = t->parms.fwmark;
+
+	fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+}
+
+static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
+				    struct net_device *dev,
+				    struct flowi6 *fl6, __u8 *dsfield,
+				    int *encap_limit)
+{
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct ip6_tnl *t = netdev_priv(dev);
+	__u16 offset;
+
+	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+	/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+
+	if (offset > 0) {
+		struct ipv6_tlv_tnl_enc_lim *tel;
+
+		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
+		if (tel->encap_limit == 0) {
+			icmpv6_send(skb, ICMPV6_PARAMPROB,
+				    ICMPV6_HDR_FIELD, offset + 2);
+			return -1;
+		}
+		*encap_limit = tel->encap_limit - 1;
+	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+		*encap_limit = t->parms.encap_limit;
+	}
+
+	memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+		*dsfield = ipv6_get_dsfield(ipv6h);
+	else
+		*dsfield = ip6_tclass(t->parms.flowinfo);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+		fl6->flowlabel |= ip6_flowlabel(ipv6h);
+
+	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+		fl6->flowi6_mark = skb->mark;
+	else
+		fl6->flowi6_mark = t->parms.fwmark;
+
+	fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+	return 0;
+}
+
 static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 			       struct net_device *dev, __u8 dsfield,
 			       struct flowi6 *fl6, int encap_limit,
@@ -517,8 +712,38 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 
 	/* Push GRE header. */
 	protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
-	gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
-			 protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+
+	if (tunnel->parms.collect_md) {
+		struct ip_tunnel_info *tun_info;
+		const struct ip_tunnel_key *key;
+		__be16 flags;
+
+		tun_info = skb_tunnel_info(skb);
+		if (unlikely(!tun_info ||
+			     !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+			     ip_tunnel_info_af(tun_info) != AF_INET6))
+			return -EINVAL;
+
+		key = &tun_info->key;
+		memset(fl6, 0, sizeof(*fl6));
+		fl6->flowi6_proto = IPPROTO_GRE;
+		fl6->daddr = key->u.ipv6.dst;
+		fl6->flowlabel = key->label;
+		fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+		dsfield = key->tos;
+		flags = key->tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+		tunnel->tun_hlen = gre_calc_hlen(flags);
+
+		gre_build_header(skb, tunnel->tun_hlen,
+				 flags, protocol,
+				 tunnel_id_to_key32(tun_info->key.tun_id), 0);
+
+	} else {
+		gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+				 protocol, tunnel->parms.o_key,
+				 htonl(tunnel->o_seqno));
+	}
 
 	return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
 			    NEXTHDR_GRE);
@@ -527,30 +752,17 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
 static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip6_tnl *t = netdev_priv(dev);
-	const struct iphdr  *iph = ip_hdr(skb);
 	int encap_limit = -1;
 	struct flowi6 fl6;
-	__u8 dsfield;
+	__u8 dsfield = 0;
 	__u32 mtu;
 	int err;
 
 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
-	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-		encap_limit = t->parms.encap_limit;
-
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-		dsfield = ipv4_get_dsfield(iph);
-	else
-		dsfield = ip6_tclass(t->parms.flowinfo);
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-		fl6.flowi6_mark = skb->mark;
-	else
-		fl6.flowi6_mark = t->parms.fwmark;
-
-	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+	if (!t->parms.collect_md)
+		prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
+					 &dsfield, &encap_limit);
 
 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
 	if (err)
@@ -574,46 +786,17 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
 	struct ip6_tnl *t = netdev_priv(dev);
 	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 	int encap_limit = -1;
-	__u16 offset;
 	struct flowi6 fl6;
-	__u8 dsfield;
+	__u8 dsfield = 0;
 	__u32 mtu;
 	int err;
 
 	if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
 		return -1;
 
-	offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
-	/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
-	ipv6h = ipv6_hdr(skb);
-
-	if (offset > 0) {
-		struct ipv6_tlv_tnl_enc_lim *tel;
-		tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
-		if (tel->encap_limit == 0) {
-			icmpv6_send(skb, ICMPV6_PARAMPROB,
-				    ICMPV6_HDR_FIELD, offset + 2);
-			return -1;
-		}
-		encap_limit = tel->encap_limit - 1;
-	} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
-		encap_limit = t->parms.encap_limit;
-
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
-		dsfield = ipv6_get_dsfield(ipv6h);
-	else
-		dsfield = ip6_tclass(t->parms.flowinfo);
-
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
-		fl6.flowlabel |= ip6_flowlabel(ipv6h);
-	if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
-		fl6.flowi6_mark = skb->mark;
-	else
-		fl6.flowi6_mark = t->parms.fwmark;
-
-	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+	if (!t->parms.collect_md &&
+	    prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
+		return -1;
 
 	if (gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)))
 		return -1;
@@ -660,7 +843,8 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 		encap_limit = t->parms.encap_limit;
 
-	memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+	if (!t->parms.collect_md)
+		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
 
 	err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
 	if (err)
@@ -705,6 +889,141 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
 	return NETDEV_TX_OK;
 }
 
+static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+					 struct net_device *dev)
+{
+	struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+	struct ip6_tnl *t = netdev_priv(dev);
+	struct dst_entry *dst = skb_dst(skb);
+	struct net_device_stats *stats;
+	bool truncate = false;
+	int encap_limit = -1;
+	__u8 dsfield = false;
+	struct flowi6 fl6;
+	int err = -EINVAL;
+	__u32 mtu;
+
+	if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
+		goto tx_err;
+
+	if (gre_handle_offloads(skb, false))
+		goto tx_err;
+
+	if (skb->len > dev->mtu + dev->hard_header_len) {
+		pskb_trim(skb, dev->mtu + dev->hard_header_len);
+		truncate = true;
+	}
+
+	t->parms.o_flags &= ~TUNNEL_KEY;
+	IPCB(skb)->flags = 0;
+
+	/* For collect_md mode, derive fl6 from the tunnel key,
+	 * for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
+	 */
+	if (t->parms.collect_md) {
+		struct ip_tunnel_info *tun_info;
+		const struct ip_tunnel_key *key;
+		struct erspan_metadata *md;
+
+		tun_info = skb_tunnel_info(skb);
+		if (unlikely(!tun_info ||
+			     !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+			     ip_tunnel_info_af(tun_info) != AF_INET6))
+			return -EINVAL;
+
+		key = &tun_info->key;
+		memset(&fl6, 0, sizeof(fl6));
+		fl6.flowi6_proto = IPPROTO_GRE;
+		fl6.daddr = key->u.ipv6.dst;
+		fl6.flowlabel = key->label;
+		fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+
+		dsfield = key->tos;
+		md = ip_tunnel_info_opts(tun_info);
+		if (!md)
+			goto tx_err;
+
+		if (md->version == 1) {
+			erspan_build_header(skb,
+					    tunnel_id_to_key32(key->tun_id),
+					    ntohl(md->u.index), truncate,
+					    false);
+		} else if (md->version == 2) {
+			u16 md2_flags;
+			u16 dir, hwid;
+
+			md2_flags = ntohs(md->u.md2.flags);
+			dir = (md2_flags & DIR_MASK) >> DIR_OFFSET;
+			hwid = (md2_flags & HWID_MASK) >> HWID_OFFSET;
+
+			erspan_build_header_v2(skb,
+					       tunnel_id_to_key32(key->tun_id),
+					       dir, hwid, truncate,
+					       false);
+		}
+	} else {
+		switch (skb->protocol) {
+		case htons(ETH_P_IP):
+			memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+			prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
+						 &dsfield, &encap_limit);
+			break;
+		case htons(ETH_P_IPV6):
+			if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+				goto tx_err;
+			if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
+						     &dsfield, &encap_limit))
+				goto tx_err;
+			break;
+		default:
+			memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+			break;
+		}
+
+		if (t->parms.erspan_ver == 1)
+			erspan_build_header(skb, t->parms.o_key,
+					    t->parms.index,
+					    truncate, false);
+		else
+			erspan_build_header_v2(skb, t->parms.o_key,
+					       t->parms.dir,
+					       t->parms.hwid,
+					       truncate, false);
+		fl6.daddr = t->parms.raddr;
+	}
+
+	/* Push GRE header. */
+	gre_build_header(skb, 8, TUNNEL_SEQ,
+			 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++));
+
+	/* TooBig packet may have updated dst->dev's mtu */
+	if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
+		dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+
+	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+			   NEXTHDR_GRE);
+	if (err != 0) {
+		/* XXX: send ICMP error even if DF is not set. */
+		if (err == -EMSGSIZE) {
+			if (skb->protocol == htons(ETH_P_IP))
+				icmp_send(skb, ICMP_DEST_UNREACH,
+					  ICMP_FRAG_NEEDED, htonl(mtu));
+			else
+				icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+		}
+
+		goto tx_err;
+	}
+	return NETDEV_TX_OK;
+
+tx_err:
+	stats = &t->dev->stats;
+	stats->tx_errors++;
+	stats->tx_dropped++;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
 static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
 {
 	struct net_device *dev = t->dev;
@@ -1078,6 +1397,10 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
 	if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 		dev->mtu -= 8;
 
+	if (tunnel->parms.collect_md) {
+		dev->features |= NETIF_F_NETNS_LOCAL;
+		netif_keep_dst(dev);
+	}
 	ip6gre_tnl_init_features(dev);
 
 	return 0;
@@ -1094,6 +1417,9 @@ static int ip6gre_tunnel_init(struct net_device *dev)
 
 	tunnel = netdev_priv(dev);
 
+	if (tunnel->parms.collect_md)
+		return 0;
+
 	memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
 	memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
 
@@ -1116,7 +1442,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
 	dev_hold(dev);
 }
 
-
 static struct inet6_protocol ip6gre_protocol __read_mostly = {
 	.handler     = gre_rcv,
 	.err_handler = ip6gre_err,
@@ -1131,7 +1456,8 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
 
 	for_each_netdev_safe(net, dev, aux)
 		if (dev->rtnl_link_ops == &ip6gre_link_ops ||
-		    dev->rtnl_link_ops == &ip6gre_tap_ops)
+		    dev->rtnl_link_ops == &ip6gre_tap_ops ||
+		    dev->rtnl_link_ops == &ip6erspan_tap_ops)
 			unregister_netdevice_queue(dev, head);
 
 	for (prio = 0; prio < 4; prio++) {
@@ -1253,6 +1579,70 @@ static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
 	return ip6gre_tunnel_validate(tb, data, extack);
 }
 
+static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
+				  struct netlink_ext_ack *extack)
+{
+	__be16 flags = 0;
+	int ret, ver = 0;
+
+	if (!data)
+		return 0;
+
+	ret = ip6gre_tap_validate(tb, data, extack);
+	if (ret)
+		return ret;
+
+	/* ERSPAN should only have GRE sequence and key flag */
+	if (data[IFLA_GRE_OFLAGS])
+		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
+	if (data[IFLA_GRE_IFLAGS])
+		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
+	if (!data[IFLA_GRE_COLLECT_METADATA] &&
+	    flags != (GRE_SEQ | GRE_KEY))
+		return -EINVAL;
+
+	/* ERSPAN Session ID only has 10-bit. Since we reuse
+	 * 32-bit key field as ID, check it's range.
+	 */
+	if (data[IFLA_GRE_IKEY] &&
+	    (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
+		return -EINVAL;
+
+	if (data[IFLA_GRE_OKEY] &&
+	    (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
+		return -EINVAL;
+
+	if (data[IFLA_GRE_ERSPAN_VER]) {
+		ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+		if (ver != 1 && ver != 2)
+			return -EINVAL;
+	}
+
+	if (ver == 1) {
+		if (data[IFLA_GRE_ERSPAN_INDEX]) {
+			u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+
+			if (index & ~INDEX_MASK)
+				return -EINVAL;
+		}
+	} else if (ver == 2) {
+		if (data[IFLA_GRE_ERSPAN_DIR]) {
+			u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+
+			if (dir & ~(DIR_MASK >> DIR_OFFSET))
+				return -EINVAL;
+		}
+
+		if (data[IFLA_GRE_ERSPAN_HWID]) {
+			u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+
+			if (hwid & ~(HWID_MASK >> HWID_OFFSET))
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
 
 static void ip6gre_netlink_parms(struct nlattr *data[],
 				struct __ip6_tnl_parm *parms)
@@ -1299,6 +1689,22 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 
 	if (data[IFLA_GRE_FWMARK])
 		parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
+
+	if (data[IFLA_GRE_COLLECT_METADATA])
+		parms->collect_md = true;
+
+	if (data[IFLA_GRE_ERSPAN_VER])
+		parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+	if (parms->erspan_ver == 1) {
+		if (data[IFLA_GRE_ERSPAN_INDEX])
+			parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+	} else if (parms->erspan_ver == 2) {
+		if (data[IFLA_GRE_ERSPAN_DIR])
+			parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+		if (data[IFLA_GRE_ERSPAN_HWID])
+			parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+	}
 }
 
 static int ip6gre_tap_init(struct net_device *dev)
@@ -1330,6 +1736,59 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
 	.ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
+static int ip6erspan_tap_init(struct net_device *dev)
+{
+	struct ip6_tnl *tunnel;
+	int t_hlen;
+	int ret;
+
+	tunnel = netdev_priv(dev);
+
+	tunnel->dev = dev;
+	tunnel->net = dev_net(dev);
+	strcpy(tunnel->parms.name, dev->name);
+
+	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+	if (!dev->tstats)
+		return -ENOMEM;
+
+	ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
+	if (ret) {
+		free_percpu(dev->tstats);
+		dev->tstats = NULL;
+		return ret;
+	}
+
+	tunnel->tun_hlen = 8;
+	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+		       erspan_hdr_len(tunnel->parms.erspan_ver);
+	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+
+	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+	dev->mtu = ETH_DATA_LEN - t_hlen;
+	if (dev->type == ARPHRD_ETHER)
+		dev->mtu -= ETH_HLEN;
+	if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		dev->mtu -= 8;
+
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	tunnel = netdev_priv(dev);
+	ip6gre_tnl_link_config(tunnel, 1);
+
+	return 0;
+}
+
+static const struct net_device_ops ip6erspan_netdev_ops = {
+	.ndo_init =		ip6erspan_tap_init,
+	.ndo_uninit =		ip6gre_tunnel_uninit,
+	.ndo_start_xmit =	ip6erspan_tunnel_xmit,
+	.ndo_set_mac_address =	eth_mac_addr,
+	.ndo_validate_addr =	eth_validate_addr,
+	.ndo_change_mtu =	ip6_tnl_change_mtu,
+	.ndo_get_stats64 =	ip_tunnel_get_stats64,
+	.ndo_get_iflink =	ip6_tnl_get_iflink,
+};
+
 static void ip6gre_tap_setup(struct net_device *dev)
 {
 
@@ -1400,8 +1859,13 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
 
 	ip6gre_netlink_parms(data, &nt->parms);
 
-	if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
-		return -EEXIST;
+	if (nt->parms.collect_md) {
+		if (rtnl_dereference(ign->collect_md_tun))
+			return -EEXIST;
+	} else {
+		if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+			return -EEXIST;
+	}
 
 	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
 		eth_hw_addr_random(dev);
@@ -1500,8 +1964,12 @@ static size_t ip6gre_get_size(const struct net_device *dev)
 		nla_total_size(2) +
 		/* IFLA_GRE_ENCAP_DPORT */
 		nla_total_size(2) +
+		/* IFLA_GRE_COLLECT_METADATA */
+		nla_total_size(0) +
 		/* IFLA_GRE_FWMARK */
 		nla_total_size(4) +
+		/* IFLA_GRE_ERSPAN_INDEX */
+		nla_total_size(4) +
 		0;
 }
 
@@ -1523,7 +1991,8 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 	    nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
 	    nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
 	    nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
-	    nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
+	    nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) ||
+	    nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
 		goto nla_put_failure;
 
 	if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
@@ -1536,6 +2005,24 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
 			t->encap.flags))
 		goto nla_put_failure;
 
+	if (p->collect_md) {
+		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+			goto nla_put_failure;
+	}
+
+	if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
+		goto nla_put_failure;
+
+	if (p->erspan_ver == 1) {
+		if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
+			goto nla_put_failure;
+	} else if (p->erspan_ver == 2) {
+		if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
+			goto nla_put_failure;
+		if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
+			goto nla_put_failure;
+	}
+
 	return 0;
 
 nla_put_failure:
@@ -1558,9 +2045,28 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
 	[IFLA_GRE_ENCAP_FLAGS]  = { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_SPORT]  = { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_DPORT]  = { .type = NLA_U16 },
+	[IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
 	[IFLA_GRE_FWMARK]       = { .type = NLA_U32 },
+	[IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
+	[IFLA_GRE_ERSPAN_VER]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_DIR]	= { .type = NLA_U8 },
+	[IFLA_GRE_ERSPAN_HWID]	= { .type = NLA_U16 },
 };
 
+static void ip6erspan_tap_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &ip6erspan_netdev_ops;
+	dev->needs_free_netdev = true;
+	dev->priv_destructor = ip6gre_dev_free;
+
+	dev->features |= NETIF_F_NETNS_LOCAL;
+	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	netif_keep_dst(dev);
+}
+
 static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
 	.kind		= "ip6gre",
 	.maxtype	= IFLA_GRE_MAX,
@@ -1590,6 +2096,20 @@ static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
 	.get_link_net	= ip6_tnl_get_link_net,
 };
 
+static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
+	.kind		= "ip6erspan",
+	.maxtype	= IFLA_GRE_MAX,
+	.policy		= ip6gre_policy,
+	.priv_size	= sizeof(struct ip6_tnl),
+	.setup		= ip6erspan_tap_setup,
+	.validate	= ip6erspan_tap_validate,
+	.newlink	= ip6gre_newlink,
+	.changelink	= ip6gre_changelink,
+	.get_size	= ip6gre_get_size,
+	.fill_info	= ip6gre_fill_info,
+	.get_link_net	= ip6_tnl_get_link_net,
+};
+
 /*
  *	And now the modules code and kernel interface.
  */
@@ -1618,9 +2138,15 @@ static int __init ip6gre_init(void)
 	if (err < 0)
 		goto tap_ops_failed;
 
+	err = rtnl_link_register(&ip6erspan_tap_ops);
+	if (err < 0)
+		goto erspan_link_failed;
+
 out:
 	return err;
 
+erspan_link_failed:
+	rtnl_link_unregister(&ip6gre_tap_ops);
 tap_ops_failed:
 	rtnl_link_unregister(&ip6gre_link_ops);
 rtnl_link_failed:
@@ -1634,6 +2160,7 @@ static void __exit ip6gre_fini(void)
 {
 	rtnl_link_unregister(&ip6gre_tap_ops);
 	rtnl_link_unregister(&ip6gre_link_ops);
+	rtnl_link_unregister(&ip6erspan_tap_ops);
 	inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
 	unregister_pernet_device(&ip6gre_net_ops);
 }
@@ -1645,4 +2172,5 @@ MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
 MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
 MODULE_ALIAS_RTNL_LINK("ip6gre");
 MODULE_ALIAS_RTNL_LINK("ip6gretap");
+MODULE_ALIAS_RTNL_LINK("ip6erspan");
 MODULE_ALIAS_NETDEV("ip6gre0");
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 688ba5f..18547a4 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
 		return ret;
 	}
 
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+	/* Policy lookup after SNAT yielded a new policy */
+	if (skb_dst(skb)->xfrm) {
+		IPCB(skb)->flags |= IPSKB_REROUTED;
+		return dst_output(net, sk, skb);
+	}
+#endif
+
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
@@ -370,7 +378,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
 	return dst_output(net, sk, skb);
 }
 
-static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
+unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 {
 	unsigned int mtu;
 	struct inet6_dev *idev;
@@ -390,6 +398,7 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 
 	return mtu;
 }
+EXPORT_SYMBOL_GPL(ip6_dst_mtu_forward);
 
 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
@@ -1209,13 +1218,13 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
 		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
 	else
 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
-		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+		      rt->dst.dev->mtu : dst_mtu(xfrm_dst_path(&rt->dst));
 	if (np->frag_size < mtu) {
 		if (np->frag_size)
 			mtu = np->frag_size;
 	}
 	cork->base.fragsize = mtu;
-	if (dst_allfrag(rt->dst.path))
+	if (dst_allfrag(xfrm_dst_path(&rt->dst)))
 		cork->base.flags |= IPCORK_ALLFRAG;
 	cork->base.length = 0;
 
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9a7cf35..8071f42 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -861,7 +861,7 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
 		struct metadata_dst *tun_dst,
 		bool log_ecn_err)
 {
-	return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
+	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
 			     log_ecn_err);
 }
 EXPORT_SYMBOL(ip6_tnl_rcv);
@@ -979,6 +979,9 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
 	int ret = 0;
 	struct net *net = t->net;
 
+	if (t->parms.collect_md)
+		return 1;
+
 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index dbb74f3..18caa95 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -626,6 +626,7 @@ static void vti6_link_config(struct ip6_tnl *t)
 {
 	struct net_device *dev = t->dev;
 	struct __ip6_tnl_parm *p = &t->parms;
+	struct net_device *tdev = NULL;
 
 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
@@ -638,6 +639,25 @@ static void vti6_link_config(struct ip6_tnl *t)
 		dev->flags |= IFF_POINTOPOINT;
 	else
 		dev->flags &= ~IFF_POINTOPOINT;
+
+	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+		int strict = (ipv6_addr_type(&p->raddr) &
+			      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
+		struct rt6_info *rt = rt6_lookup(t->net,
+						 &p->raddr, &p->laddr,
+						 p->link, strict);
+
+		if (rt)
+			tdev = rt->dst.dev;
+		ip6_rt_put(rt);
+	}
+
+	if (!tdev && p->link)
+		tdev = __dev_get_by_index(t->net, p->link);
+
+	if (tdev)
+		dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
+				 IPV6_MIN_MTU);
 }
 
 /**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index a2e1a86..890f9bda 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1425,10 +1425,13 @@ int __init ip6_mr_init(void)
 		goto add_proto_fail;
 	}
 #endif
-	rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
-		      ip6mr_rtm_dumproute, 0);
-	return 0;
+	err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
+				   NULL, ip6mr_rtm_dumproute, 0);
+	if (err == 0)
+		return 0;
+
 #ifdef CONFIG_IPV6_PIMSM_V2
+	inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
 add_proto_fail:
 	unregister_netdevice_notifier(&ip6_mr_notifier);
 #endif
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39970e2..d95ceca 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -68,32 +68,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ip6_route_me_harder);
 
-/*
- * Extra routing may needed on local out, as the QUEUE target never
- * returns control to the table.
- */
-
-struct ip6_rt_info {
-	struct in6_addr daddr;
-	struct in6_addr saddr;
-	u_int32_t mark;
-};
-
-static void nf_ip6_saveroute(const struct sk_buff *skb,
-			     struct nf_queue_entry *entry)
-{
-	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
-
-	if (entry->state.hook == NF_INET_LOCAL_OUT) {
-		const struct ipv6hdr *iph = ipv6_hdr(skb);
-
-		rt_info->daddr = iph->daddr;
-		rt_info->saddr = iph->saddr;
-		rt_info->mark = skb->mark;
-	}
-}
-
-static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
+static int nf_ip6_reroute(struct sk_buff *skb,
 			  const struct nf_queue_entry *entry)
 {
 	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -103,7 +78,7 @@ static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
 		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
 		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
 		    skb->mark != rt_info->mark)
-			return ip6_route_me_harder(net, skb);
+			return ip6_route_me_harder(entry->state.net, skb);
 	}
 	return 0;
 }
@@ -190,25 +165,19 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
 };
 
 static const struct nf_ipv6_ops ipv6ops = {
-	.chk_addr	= ipv6_chk_addr,
-	.route_input    = ip6_route_input,
-	.fragment	= ip6_fragment
-};
-
-static const struct nf_afinfo nf_ip6_afinfo = {
-	.family			= AF_INET6,
+	.chk_addr		= ipv6_chk_addr,
+	.route_input    	= ip6_route_input,
+	.fragment		= ip6_fragment,
 	.checksum		= nf_ip6_checksum,
 	.checksum_partial	= nf_ip6_checksum_partial,
 	.route			= nf_ip6_route,
-	.saveroute		= nf_ip6_saveroute,
 	.reroute		= nf_ip6_reroute,
-	.route_key_size		= sizeof(struct ip6_rt_info),
 };
 
 int __init ipv6_netfilter_init(void)
 {
 	RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
-	return nf_register_afinfo(&nf_ip6_afinfo);
+	return 0;
 }
 
 /* This can be called from inet6_init() on errors, so it cannot
@@ -217,5 +186,4 @@ int __init ipv6_netfilter_init(void)
 void ipv6_netfilter_fini(void)
 {
 	RCU_INIT_POINTER(nf_ipv6_ops, NULL);
-	nf_unregister_afinfo(&nf_ip6_afinfo);
 }
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 6acb2ee..806e953 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -71,6 +71,14 @@
 endif # NF_TABLES_IPV6
 endif # NF_TABLES
 
+config NF_FLOW_TABLE_IPV6
+	select NF_FLOW_TABLE
+	tristate "Netfilter flow table IPv6 module"
+	help
+	  This option adds the flow table IPv6 support.
+
+	  To compile it as a module, choose M here.
+
 config NF_DUP_IPV6
 	tristate "Netfilter IPv6 packet duplication to alternate destination"
 	depends on !NF_CONNTRACK || NF_CONNTRACK
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c6ee0cd..95611c4 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -45,6 +45,9 @@
 obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
 obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o
 
+# flow table support
+obj-$(CONFIG_NF_FLOW_TABLE_IPV6) += nf_flow_table_ipv6.o
+
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 1d7ae93..6ebbef2 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -991,9 +991,8 @@ static int get_info(struct net *net, void __user *user,
 	if (compat)
 		xt_compat_lock(AF_INET6);
 #endif
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
-				    "ip6table_%s", name);
-	if (t) {
+	t = xt_request_find_table_lock(net, AF_INET6, name);
+	if (!IS_ERR(t)) {
 		struct ip6t_getinfo info;
 		const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
@@ -1023,7 +1022,7 @@ static int get_info(struct net *net, void __user *user,
 		xt_table_unlock(t);
 		module_put(t->me);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
 	if (compat)
 		xt_compat_unlock(AF_INET6);
@@ -1049,7 +1048,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
 	get.name[sizeof(get.name) - 1] = '\0';
 
 	t = xt_find_table_lock(net, AF_INET6, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		struct xt_table_info *private = t->private;
 		if (get.size == private->size)
 			ret = copy_entries_to_user(private->size,
@@ -1060,7 +1059,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	return ret;
 }
@@ -1083,10 +1082,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 		goto out;
 	}
 
-	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
-				    "ip6table_%s", name);
-	if (!t) {
-		ret = -ENOENT;
+	t = xt_request_find_table_lock(net, AF_INET6, name);
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free_newinfo_counters_untrans;
 	}
 
@@ -1199,8 +1197,8 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
 	if (IS_ERR(paddc))
 		return PTR_ERR(paddc);
 	t = xt_find_table_lock(net, AF_INET6, tmp.name);
-	if (!t) {
-		ret = -ENOENT;
+	if (IS_ERR(t)) {
+		ret = PTR_ERR(t);
 		goto free;
 	}
 
@@ -1636,7 +1634,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
 
 	xt_compat_lock(AF_INET6);
 	t = xt_find_table_lock(net, AF_INET6, get.name);
-	if (t) {
+	if (!IS_ERR(t)) {
 		const struct xt_table_info *private = t->private;
 		struct xt_table_info info;
 		ret = compat_table_info(private, &info);
@@ -1650,7 +1648,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
 		module_put(t->me);
 		xt_table_unlock(t);
 	} else
-		ret = -ENOENT;
+		ret = PTR_ERR(t);
 
 	xt_compat_unlock(AF_INET6);
 	return ret;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 2b1a9dc..b0524b1 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -42,14 +42,6 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
 	u_int8_t hop_limit;
 	u_int32_t flowlabel, mark;
 	int err;
-#if 0
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct iphdr) ||
-	    ip_hdrlen(skb) < sizeof(struct iphdr)) {
-		net_warn_ratelimited("ip6t_hook: happy cracking\n");
-		return NF_ACCEPT;
-	}
-#endif
 
 	/* save source/dest address, mark, hoplimit, flowlabel, priority,  */
 	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 9915125..47306e4 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -74,6 +74,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
 	{
 		.hook		= ip6table_nat_in,
 		.pf		= NFPROTO_IPV6,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP6_PRI_NAT_DST,
 	},
@@ -81,6 +82,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
 	{
 		.hook		= ip6table_nat_out,
 		.pf		= NFPROTO_IPV6,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP6_PRI_NAT_SRC,
 	},
@@ -88,12 +90,14 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
 	{
 		.hook		= ip6table_nat_local_fn,
 		.pf		= NFPROTO_IPV6,
+		.nat_hook	= true,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP6_PRI_NAT_DST,
 	},
 	/* After packet filtering, change source */
 	{
 		.hook		= ip6table_nat_fn,
+		.nat_hook	= true,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP6_PRI_NAT_SRC,
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3b80a38..11a313f 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -176,11 +176,6 @@ static unsigned int ipv6_conntrack_local(void *priv,
 					 struct sk_buff *skb,
 					 const struct nf_hook_state *state)
 {
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct ipv6hdr)) {
-		net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
-		return NF_ACCEPT;
-	}
 	return nf_conntrack_in(state->net, PF_INET6, state->hook, skb);
 }
 
@@ -368,7 +363,7 @@ static struct nf_sockopt_ops so_getorigdst6 = {
 	.owner		= THIS_MODULE,
 };
 
-static struct nf_conntrack_l4proto *builtin_l4proto6[] = {
+static const struct nf_conntrack_l4proto * const builtin_l4proto6[] = {
 	&nf_conntrack_l4proto_tcp6,
 	&nf_conntrack_l4proto_udp6,
 	&nf_conntrack_l4proto_icmpv6,
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3ac0d82..2548e2c 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -27,7 +27,7 @@
 #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
 #include <net/netfilter/nf_log.h>
 
-static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
 
 static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
 {
@@ -352,7 +352,7 @@ static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.icmpv6.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_ICMPV6,
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
new file mode 100644
index 0000000..0c3b9d3
--- /dev/null
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -0,0 +1,278 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/ipv6.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/neighbour.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+/* For layer 4 checksum field offset. */
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
+				struct in6_addr *addr,
+				struct in6_addr *new_addr)
+{
+	struct tcphdr *tcph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+		return -1;
+
+	tcph = (void *)(skb_network_header(skb) + thoff);
+	inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
+				  new_addr->s6_addr32, true);
+
+	return 0;
+}
+
+static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
+				struct in6_addr *addr,
+				struct in6_addr *new_addr)
+{
+	struct udphdr *udph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
+		return -1;
+
+	udph = (void *)(skb_network_header(skb) + thoff);
+	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+		inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
+					  new_addr->s6_addr32, true);
+		if (!udph->check)
+			udph->check = CSUM_MANGLED_0;
+	}
+
+	return 0;
+}
+
+static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
+				    unsigned int thoff, struct in6_addr *addr,
+				    struct in6_addr *new_addr)
+{
+	switch (ip6h->nexthdr) {
+	case IPPROTO_TCP:
+		if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	case IPPROTO_UDP:
+		if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
+			return NF_DROP;
+		break;
+	}
+
+	return 0;
+}
+
+static int nf_flow_snat_ipv6(const struct flow_offload *flow,
+			     struct sk_buff *skb, struct ipv6hdr *ip6h,
+			     unsigned int thoff,
+			     enum flow_offload_tuple_dir dir)
+{
+	struct in6_addr addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = ip6h->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
+		ip6h->saddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = ip6h->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
+		ip6h->daddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
+}
+
+static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
+			     struct sk_buff *skb, struct ipv6hdr *ip6h,
+			     unsigned int thoff,
+			     enum flow_offload_tuple_dir dir)
+{
+	struct in6_addr addr, new_addr;
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		addr = ip6h->daddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
+		ip6h->daddr = new_addr;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		addr = ip6h->saddr;
+		new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
+		ip6h->saddr = new_addr;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
+}
+
+static int nf_flow_nat_ipv6(const struct flow_offload *flow,
+			    struct sk_buff *skb,
+			    enum flow_offload_tuple_dir dir)
+{
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	unsigned int thoff = sizeof(*ip6h);
+
+	if (flow->flags & FLOW_OFFLOAD_SNAT &&
+	    (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
+	     nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+		return -1;
+	if (flow->flags & FLOW_OFFLOAD_DNAT &&
+	    (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
+	     nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+		return -1;
+
+	return 0;
+}
+
+static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
+			      struct flow_offload_tuple *tuple)
+{
+	struct flow_ports *ports;
+	struct ipv6hdr *ip6h;
+	unsigned int thoff;
+
+	if (!pskb_may_pull(skb, sizeof(*ip6h)))
+		return -1;
+
+	ip6h = ipv6_hdr(skb);
+
+	if (ip6h->nexthdr != IPPROTO_TCP &&
+	    ip6h->nexthdr != IPPROTO_UDP)
+		return -1;
+
+	thoff = sizeof(*ip6h);
+	if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+		return -1;
+
+	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
+
+	tuple->src_v6		= ip6h->saddr;
+	tuple->dst_v6		= ip6h->daddr;
+	tuple->src_port		= ports->source;
+	tuple->dst_port		= ports->dest;
+	tuple->l3proto		= AF_INET6;
+	tuple->l4proto		= ip6h->nexthdr;
+	tuple->iifidx		= dev->ifindex;
+
+	return 0;
+}
+
+/* Based on ip_exceeds_mtu(). */
+static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+	if (skb->len <= mtu)
+		return false;
+
+	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+		return false;
+
+	return true;
+}
+
+static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rt6_info *rt)
+{
+	u32 mtu;
+
+	mtu = ip6_dst_mtu_forward(&rt->dst);
+	if (__nf_flow_exceeds_mtu(skb, mtu))
+		return true;
+
+	return false;
+}
+
+unsigned int
+nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+			  const struct nf_hook_state *state)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct nf_flowtable *flow_table = priv;
+	struct flow_offload_tuple tuple = {};
+	enum flow_offload_tuple_dir dir;
+	struct flow_offload *flow;
+	struct net_device *outdev;
+	struct in6_addr *nexthop;
+	struct ipv6hdr *ip6h;
+	struct rt6_info *rt;
+
+	if (skb->protocol != htons(ETH_P_IPV6))
+		return NF_ACCEPT;
+
+	if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
+		return NF_ACCEPT;
+
+	tuplehash = flow_offload_lookup(flow_table, &tuple);
+	if (tuplehash == NULL)
+		return NF_ACCEPT;
+
+	outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
+	if (!outdev)
+		return NF_ACCEPT;
+
+	dir = tuplehash->tuple.dir;
+	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+
+	rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
+	if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
+		return NF_ACCEPT;
+
+	if (skb_try_make_writable(skb, sizeof(*ip6h)))
+		return NF_DROP;
+
+	if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
+	    nf_flow_nat_ipv6(flow, skb, dir) < 0)
+		return NF_DROP;
+
+	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+	ip6h = ipv6_hdr(skb);
+	ip6h->hop_limit--;
+
+	skb->dev = outdev;
+	nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
+	neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
+
+	return NF_STOLEN;
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
+
+static struct nf_flowtable_type flowtable_ipv6 = {
+	.family		= NFPROTO_IPV6,
+	.params		= &nf_flow_offload_rhash_params,
+	.gc		= nf_flow_offload_work_gc,
+	.hook		= nf_flow_offload_ipv6_hook,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nf_flow_ipv6_module_init(void)
+{
+	nft_register_flowtable_type(&flowtable_ipv6);
+
+	return 0;
+}
+
+static void __exit nf_flow_ipv6_module_exit(void)
+{
+	nft_unregister_flowtable_type(&flowtable_ipv6);
+}
+
+module_init(nf_flow_ipv6_module_init);
+module_exit(nf_flow_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 1d2fb92..bed57ee 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -369,10 +369,6 @@ nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
 #endif
 	unsigned int ret;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct ipv6hdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
 #ifdef CONFIG_XFRM
 	if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -408,10 +404,6 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
 	unsigned int ret;
 	int err;
 
-	/* root is playing with raw sockets. */
-	if (skb->len < sizeof(struct ipv6hdr))
-		return NF_ACCEPT;
-
 	ret = nf_nat_ipv6_fn(priv, skb, state, do_chain);
 	if (ret != NF_DROP && ret != NF_STOLEN &&
 	    (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
index d6e4ba5..9cd45b9 100644
--- a/net/ipv6/netfilter/nf_tables_ipv6.c
+++ b/net/ipv6/netfilter/nf_tables_ipv6.c
@@ -22,39 +22,17 @@ static unsigned int nft_do_chain_ipv6(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv6(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv6(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
 
-static unsigned int nft_ipv6_output(void *priv,
-				    struct sk_buff *skb,
-				    const struct nf_hook_state *state)
-{
-	if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
-		if (net_ratelimit())
-			pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
-				"packet\n");
-		return NF_ACCEPT;
-	}
-
-	return nft_do_chain_ipv6(priv, skb, state);
-}
-
-struct nft_af_info nft_af_ipv6 __read_mostly = {
+static struct nft_af_info nft_af_ipv6 __read_mostly = {
 	.family		= NFPROTO_IPV6,
 	.nhooks		= NF_INET_NUMHOOKS,
 	.owner		= THIS_MODULE,
-	.nops		= 1,
-	.hooks		= {
-		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv6,
-		[NF_INET_LOCAL_OUT]	= nft_ipv6_output,
-		[NF_INET_FORWARD]	= nft_do_chain_ipv6,
-		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv6,
-		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv6,
-	},
 };
-EXPORT_SYMBOL_GPL(nft_af_ipv6);
 
 static int nf_tables_ipv6_init_net(struct net *net)
 {
@@ -94,6 +72,13 @@ static const struct nf_chain_type filter_ipv6 = {
 			  (1 << NF_INET_FORWARD) |
 			  (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING),
+	.hooks		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_ipv6,
+		[NF_INET_LOCAL_OUT]	= nft_do_chain_ipv6,
+		[NF_INET_FORWARD]	= nft_do_chain_ipv6,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_ipv6,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_ipv6,
+	},
 };
 
 static int __init nf_tables_ipv6_init(void)
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index 443cd30..73fe2bd 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -31,7 +31,8 @@ static unsigned int nft_nat_do_chain(void *priv,
 {
 	struct nft_pktinfo pkt;
 
-	nft_set_pktinfo_ipv6(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv6(&pkt, skb);
 
 	return nft_do_chain(&pkt, priv);
 }
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index f272747..11d3c3b 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -33,7 +33,8 @@ static unsigned int nf_route_table_hook(void *priv,
 	u32 mark, flowlabel;
 	int err;
 
-	nft_set_pktinfo_ipv6(&pkt, skb, state);
+	nft_set_pktinfo(&pkt, skb, state);
+	nft_set_pktinfo_ipv6(&pkt, skb);
 
 	/* save source/dest address, mark, hoplimit, flowlabel, priority */
 	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 54b5899..cc5174c 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -60,7 +60,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
 {
 	const struct net_device *dev = NULL;
 	const struct nf_ipv6_ops *v6ops;
-	const struct nf_afinfo *afinfo;
 	int route_err, addrtype;
 	struct rt6_info *rt;
 	struct flowi6 fl6 = {
@@ -69,8 +68,8 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
 	};
 	u32 ret = 0;
 
-	afinfo = nf_get_afinfo(NFPROTO_IPV6);
-	if (!afinfo)
+	v6ops = nf_get_ipv6_ops();
+	if (!v6ops)
 		return RTN_UNREACHABLE;
 
 	if (priv->flags & NFTA_FIB_F_IIF)
@@ -80,12 +79,11 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv,
 
 	nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph);
 
-	v6ops = nf_get_ipv6_ops();
-	if (dev && v6ops && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
+	if (dev && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true))
 		ret = RTN_LOCAL;
 
-	route_err = afinfo->route(nft_net(pkt), (struct dst_entry **)&rt,
-				  flowi6_to_flowi(&fl6), false);
+	route_err = v6ops->route(nft_net(pkt), (struct dst_entry **)&rt,
+				 flowi6_to_flowi(&fl6), false);
 	if (route_err)
 		goto err;
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0458b76..1076ae0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -186,7 +186,7 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
 
 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
 {
-	return dst_metrics_write_ptr(rt->dst.from);
+	return dst_metrics_write_ptr(&rt->from->dst);
 }
 
 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -391,7 +391,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
 {
 	struct rt6_info *rt = (struct rt6_info *)dst;
 	struct rt6_exception_bucket *bucket;
-	struct dst_entry *from = dst->from;
+	struct rt6_info *from = rt->from;
 	struct inet6_dev *idev;
 
 	dst_destroy_metrics_generic(dst);
@@ -409,8 +409,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
 		kfree(bucket);
 	}
 
-	dst->from = NULL;
-	dst_release(from);
+	rt->from = NULL;
+	dst_release(&from->dst);
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -443,9 +443,9 @@ static bool rt6_check_expired(const struct rt6_info *rt)
 	if (rt->rt6i_flags & RTF_EXPIRES) {
 		if (time_after(jiffies, rt->dst.expires))
 			return true;
-	} else if (rt->dst.from) {
+	} else if (rt->from) {
 		return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
-		       rt6_check_expired((struct rt6_info *)rt->dst.from);
+			rt6_check_expired(rt->from);
 	}
 	return false;
 }
@@ -455,7 +455,6 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
 					     int strict)
 {
 	struct rt6_info *sibling, *next_sibling;
-	int route_choosen;
 
 	/* We might have already computed the hash for ICMPv6 errors. In such
 	 * case it will always be non-zero. Otherwise now is the time to do it.
@@ -463,26 +462,19 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
 	if (!fl6->mp_hash)
 		fl6->mp_hash = rt6_multipath_hash(fl6, NULL);
 
-	route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1);
-	/* Don't change the route, if route_choosen == 0
-	 * (siblings does not include ourself)
-	 */
-	if (route_choosen)
-		list_for_each_entry_safe(sibling, next_sibling,
-				&match->rt6i_siblings, rt6i_siblings) {
-			route_choosen--;
-			if (route_choosen == 0) {
-				struct inet6_dev *idev = sibling->rt6i_idev;
+	if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound))
+		return match;
 
-				if (!netif_carrier_ok(sibling->dst.dev) &&
-				    idev->cnf.ignore_routes_with_linkdown)
-					break;
-				if (rt6_score_route(sibling, oif, strict) < 0)
-					break;
-				match = sibling;
-				break;
-			}
-		}
+	list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings,
+				 rt6i_siblings) {
+		if (fl6->mp_hash > atomic_read(&sibling->rt6i_nh_upper_bound))
+			continue;
+		if (rt6_score_route(sibling, oif, strict) < 0)
+			break;
+		match = sibling;
+		break;
+	}
+
 	return match;
 }
 
@@ -499,12 +491,15 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
 	struct rt6_info *local = NULL;
 	struct rt6_info *sprt;
 
-	if (!oif && ipv6_addr_any(saddr))
-		goto out;
+	if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD))
+		return rt;
 
-	for (sprt = rt; sprt; sprt = rcu_dereference(sprt->dst.rt6_next)) {
+	for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
 		struct net_device *dev = sprt->dst.dev;
 
+		if (sprt->rt6i_nh_flags & RTNH_F_DEAD)
+			continue;
+
 		if (oif) {
 			if (dev->ifindex == oif)
 				return sprt;
@@ -533,8 +528,8 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
 		if (flags & RT6_LOOKUP_F_IFACE)
 			return net->ipv6.ip6_null_entry;
 	}
-out:
-	return rt;
+
+	return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt;
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
@@ -679,10 +674,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
 	int m;
 	bool match_do_rr = false;
 	struct inet6_dev *idev = rt->rt6i_idev;
-	struct net_device *dev = rt->dst.dev;
 
-	if (dev && !netif_carrier_ok(dev) &&
-	    idev->cnf.ignore_routes_with_linkdown &&
+	if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+		goto out;
+
+	if (idev->cnf.ignore_routes_with_linkdown &&
+	    rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
 	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
 		goto out;
 
@@ -721,7 +718,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
 
 	match = NULL;
 	cont = NULL;
-	for (rt = rr_head; rt; rt = rcu_dereference(rt->dst.rt6_next)) {
+	for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
 		if (rt->rt6i_metric != metric) {
 			cont = rt;
 			break;
@@ -731,7 +728,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
 	}
 
 	for (rt = leaf; rt && rt != rr_head;
-	     rt = rcu_dereference(rt->dst.rt6_next)) {
+	     rt = rcu_dereference(rt->rt6_next)) {
 		if (rt->rt6i_metric != metric) {
 			cont = rt;
 			break;
@@ -743,7 +740,7 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
 	if (match || !cont)
 		return match;
 
-	for (rt = cont; rt; rt = rcu_dereference(rt->dst.rt6_next))
+	for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next))
 		match = find_match(rt, oif, strict, &mpri, match, do_rr);
 
 	return match;
@@ -781,7 +778,7 @@ static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
 			     &do_rr);
 
 	if (do_rr) {
-		struct rt6_info *next = rcu_dereference(rt0->dst.rt6_next);
+		struct rt6_info *next = rcu_dereference(rt0->rt6_next);
 
 		/* no entries matched; do round-robin */
 		if (!next || next->rt6i_metric != rt0->rt6i_metric)
@@ -1054,7 +1051,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
 	 */
 
 	if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
-		ort = (struct rt6_info *)ort->dst.from;
+		ort = ort->from;
 
 	rcu_read_lock();
 	dev = ip6_rt_get_dev_rcu(ort);
@@ -1274,7 +1271,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
 
 	/* ort can't be a cache or pcpu route */
 	if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
-		ort = (struct rt6_info *)ort->dst.from;
+		ort = ort->from;
 	WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
 
 	spin_lock_bh(&rt6_exception_lock);
@@ -1346,7 +1343,9 @@ static int rt6_insert_exception(struct rt6_info *nrt,
 
 	/* Update fn->fn_sernum to invalidate all cached dst */
 	if (!err) {
+		spin_lock_bh(&ort->rt6i_table->tb6_lock);
 		fib6_update_sernum(ort);
+		spin_unlock_bh(&ort->rt6i_table->tb6_lock);
 		fib6_force_start_gc(net);
 	}
 
@@ -1415,8 +1414,8 @@ static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
 /* Remove the passed in cached rt from the hash table that contains it */
 int rt6_remove_exception_rt(struct rt6_info *rt)
 {
-	struct rt6_info *from = (struct rt6_info *)rt->dst.from;
 	struct rt6_exception_bucket *bucket;
+	struct rt6_info *from = rt->from;
 	struct in6_addr *src_key = NULL;
 	struct rt6_exception *rt6_ex;
 	int err;
@@ -1460,8 +1459,8 @@ int rt6_remove_exception_rt(struct rt6_info *rt)
  */
 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
 {
-	struct rt6_info *from = (struct rt6_info *)rt->dst.from;
 	struct rt6_exception_bucket *bucket;
+	struct rt6_info *from = rt->from;
 	struct in6_addr *src_key = NULL;
 	struct rt6_exception *rt6_ex;
 
@@ -1824,10 +1823,10 @@ u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb)
 
 	if (skb) {
 		ip6_multipath_l3_keys(skb, &hash_keys);
-		return flow_hash_from_keys(&hash_keys);
+		return flow_hash_from_keys(&hash_keys) >> 1;
 	}
 
-	return get_hash_from_flowi6(fl6);
+	return get_hash_from_flowi6(fl6) >> 1;
 }
 
 void ip6_route_input(struct sk_buff *skb)
@@ -1929,9 +1928,9 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
 
 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
 {
-	if (rt->dst.from &&
-	    dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
-		dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
+	if (rt->from &&
+	    dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(&rt->from->dst))
+		dst_init_metrics(&rt->dst, dst_metrics_ptr(&rt->from->dst), true);
 }
 
 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
@@ -1951,7 +1950,7 @@ static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
 {
 	if (!__rt6_check_expired(rt) &&
 	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
-	    rt6_check((struct rt6_info *)(rt->dst.from), cookie))
+	    rt6_check(rt->from, cookie))
 		return &rt->dst;
 	else
 		return NULL;
@@ -1971,7 +1970,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 	rt6_dst_from_metrics_check(rt);
 
 	if (rt->rt6i_flags & RTF_PCPU ||
-	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
+	    (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
 		return rt6_dst_from_check(rt, cookie);
 	else
 		return rt6_check(rt, cookie);
@@ -2154,6 +2153,8 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 restart:
 	for_each_fib6_node_rt_rcu(fn) {
+		if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+			continue;
 		if (rt6_check_expired(rt))
 			continue;
 		if (rt->dst.error)
@@ -2344,7 +2345,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 	rt->rt6i_idev     = idev;
 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
 
-	/* Add this dst into uncached_list so that rt6_ifdown() can
+	/* Add this dst into uncached_list so that rt6_disable_ip() can
 	 * do proper release of the net_device
 	 */
 	rt6_uncached_list_add(rt);
@@ -2593,6 +2594,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 #endif
 
 	rt->rt6i_metric = cfg->fc_metric;
+	rt->rt6i_nh_weight = 1;
 
 	/* We cannot add true routes via loopback here,
 	   they would result in kernel looping; promote them to reject routes
@@ -2746,6 +2748,9 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
 	rt->rt6i_flags = cfg->fc_flags;
 
 install_route:
+	if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
+	    !netif_carrier_ok(dev))
+		rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
 	rt->dst.dev = dev;
 	rt->rt6i_idev = idev;
 	rt->rt6i_table = table;
@@ -3056,11 +3061,11 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
 
 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
 {
-	BUG_ON(from->dst.from);
+	BUG_ON(from->from);
 
 	rt->rt6i_flags &= ~RTF_EXPIRES;
 	dst_hold(&from->dst);
-	rt->dst.from = &from->dst;
+	rt->from = from;
 	dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
 }
 
@@ -3459,37 +3464,245 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
 	fib6_clean_all(net, fib6_clean_tohost, gateway);
 }
 
-struct arg_dev_net {
-	struct net_device *dev;
-	struct net *net;
+struct arg_netdev_event {
+	const struct net_device *dev;
+	union {
+		unsigned int nh_flags;
+		unsigned long event;
+	};
 };
 
-/* called with write lock held for table with rt */
-static int fib6_ifdown(struct rt6_info *rt, void *arg)
+static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt)
 {
-	const struct arg_dev_net *adn = arg;
-	const struct net_device *dev = adn->dev;
+	struct rt6_info *iter;
+	struct fib6_node *fn;
 
-	if ((rt->dst.dev == dev || !dev) &&
-	    rt != adn->net->ipv6.ip6_null_entry &&
-	    (rt->rt6i_nsiblings == 0 ||
-	     (dev && netdev_unregistering(dev)) ||
-	     !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
-		return -1;
+	fn = rcu_dereference_protected(rt->rt6i_node,
+			lockdep_is_held(&rt->rt6i_table->tb6_lock));
+	iter = rcu_dereference_protected(fn->leaf,
+			lockdep_is_held(&rt->rt6i_table->tb6_lock));
+	while (iter) {
+		if (iter->rt6i_metric == rt->rt6i_metric &&
+		    rt6_qualify_for_ecmp(iter))
+			return iter;
+		iter = rcu_dereference_protected(iter->rt6_next,
+				lockdep_is_held(&rt->rt6i_table->tb6_lock));
+	}
+
+	return NULL;
+}
+
+static bool rt6_is_dead(const struct rt6_info *rt)
+{
+	if (rt->rt6i_nh_flags & RTNH_F_DEAD ||
+	    (rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
+	     rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
+		return true;
+
+	return false;
+}
+
+static int rt6_multipath_total_weight(const struct rt6_info *rt)
+{
+	struct rt6_info *iter;
+	int total = 0;
+
+	if (!rt6_is_dead(rt))
+		total += rt->rt6i_nh_weight;
+
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) {
+		if (!rt6_is_dead(iter))
+			total += iter->rt6i_nh_weight;
+	}
+
+	return total;
+}
+
+static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total)
+{
+	int upper_bound = -1;
+
+	if (!rt6_is_dead(rt)) {
+		*weight += rt->rt6i_nh_weight;
+		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
+						    total) - 1;
+	}
+	atomic_set(&rt->rt6i_nh_upper_bound, upper_bound);
+}
+
+static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total)
+{
+	struct rt6_info *iter;
+	int weight = 0;
+
+	rt6_upper_bound_set(rt, &weight, total);
+
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		rt6_upper_bound_set(iter, &weight, total);
+}
+
+void rt6_multipath_rebalance(struct rt6_info *rt)
+{
+	struct rt6_info *first;
+	int total;
+
+	/* In case the entire multipath route was marked for flushing,
+	 * then there is no need to rebalance upon the removal of every
+	 * sibling route.
+	 */
+	if (!rt->rt6i_nsiblings || rt->should_flush)
+		return;
+
+	/* During lookup routes are evaluated in order, so we need to
+	 * make sure upper bounds are assigned from the first sibling
+	 * onwards.
+	 */
+	first = rt6_multipath_first_sibling(rt);
+	if (WARN_ON_ONCE(!first))
+		return;
+
+	total = rt6_multipath_total_weight(first);
+	rt6_multipath_upper_bound_set(first, total);
+}
+
+static int fib6_ifup(struct rt6_info *rt, void *p_arg)
+{
+	const struct arg_netdev_event *arg = p_arg;
+	const struct net *net = dev_net(arg->dev);
+
+	if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) {
+		rt->rt6i_nh_flags &= ~arg->nh_flags;
+		fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt);
+		rt6_multipath_rebalance(rt);
+	}
 
 	return 0;
 }
 
-void rt6_ifdown(struct net *net, struct net_device *dev)
+void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
 {
-	struct arg_dev_net adn = {
+	struct arg_netdev_event arg = {
 		.dev = dev,
-		.net = net,
+		.nh_flags = nh_flags,
 	};
 
-	fib6_clean_all(net, fib6_ifdown, &adn);
-	if (dev)
-		rt6_uncached_list_flush_dev(net, dev);
+	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
+		arg.nh_flags |= RTNH_F_LINKDOWN;
+
+	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
+}
+
+static bool rt6_multipath_uses_dev(const struct rt6_info *rt,
+				   const struct net_device *dev)
+{
+	struct rt6_info *iter;
+
+	if (rt->dst.dev == dev)
+		return true;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		if (iter->dst.dev == dev)
+			return true;
+
+	return false;
+}
+
+static void rt6_multipath_flush(struct rt6_info *rt)
+{
+	struct rt6_info *iter;
+
+	rt->should_flush = 1;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		iter->should_flush = 1;
+}
+
+static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt,
+					     const struct net_device *down_dev)
+{
+	struct rt6_info *iter;
+	unsigned int dead = 0;
+
+	if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD)
+		dead++;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		if (iter->dst.dev == down_dev ||
+		    iter->rt6i_nh_flags & RTNH_F_DEAD)
+			dead++;
+
+	return dead;
+}
+
+static void rt6_multipath_nh_flags_set(struct rt6_info *rt,
+				       const struct net_device *dev,
+				       unsigned int nh_flags)
+{
+	struct rt6_info *iter;
+
+	if (rt->dst.dev == dev)
+		rt->rt6i_nh_flags |= nh_flags;
+	list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
+		if (iter->dst.dev == dev)
+			iter->rt6i_nh_flags |= nh_flags;
+}
+
+/* called with write lock held for table with rt */
+static int fib6_ifdown(struct rt6_info *rt, void *p_arg)
+{
+	const struct arg_netdev_event *arg = p_arg;
+	const struct net_device *dev = arg->dev;
+	const struct net *net = dev_net(dev);
+
+	if (rt == net->ipv6.ip6_null_entry)
+		return 0;
+
+	switch (arg->event) {
+	case NETDEV_UNREGISTER:
+		return rt->dst.dev == dev ? -1 : 0;
+	case NETDEV_DOWN:
+		if (rt->should_flush)
+			return -1;
+		if (!rt->rt6i_nsiblings)
+			return rt->dst.dev == dev ? -1 : 0;
+		if (rt6_multipath_uses_dev(rt, dev)) {
+			unsigned int count;
+
+			count = rt6_multipath_dead_count(rt, dev);
+			if (rt->rt6i_nsiblings + 1 == count) {
+				rt6_multipath_flush(rt);
+				return -1;
+			}
+			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
+						   RTNH_F_LINKDOWN);
+			fib6_update_sernum(rt);
+			rt6_multipath_rebalance(rt);
+		}
+		return -2;
+	case NETDEV_CHANGE:
+		if (rt->dst.dev != dev ||
+		    rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
+			break;
+		rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
+		rt6_multipath_rebalance(rt);
+		break;
+	}
+
+	return 0;
+}
+
+void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
+{
+	struct arg_netdev_event arg = {
+		.dev = dev,
+		.event = event,
+	};
+
+	fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
+}
+
+void rt6_disable_ip(struct net_device *dev, unsigned long event)
+{
+	rt6_sync_down_dev(dev, event);
+	rt6_uncached_list_flush_dev(dev_net(dev), dev);
+	neigh_ifdown(&nd_tbl, dev);
 }
 
 struct rt6_mtu_change_arg {
@@ -3812,6 +4025,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
 			goto cleanup;
 		}
 
+		rt->rt6i_nh_weight = rtnh->rtnh_hops + 1;
+
 		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
 		if (err) {
 			dst_release_immediate(&rt->dst);
@@ -3992,7 +4207,10 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt)
 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
 			    unsigned int *flags, bool skip_oif)
 {
-	if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
+	if (rt->rt6i_nh_flags & RTNH_F_DEAD)
+		*flags |= RTNH_F_DEAD;
+
+	if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) {
 		*flags |= RTNH_F_LINKDOWN;
 		if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
 			*flags |= RTNH_F_DEAD;
@@ -4031,7 +4249,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
 	if (!rtnh)
 		goto nla_put_failure;
 
-	rtnh->rtnh_hops = 0;
+	rtnh->rtnh_hops = rt->rt6i_nh_weight - 1;
 	rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
 
 	if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
@@ -4321,9 +4539,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 		goto errout;
 	}
 
-	if (fibmatch && rt->dst.from) {
-		struct rt6_info *ort = container_of(rt->dst.from,
-						    struct rt6_info, dst);
+	if (fibmatch && rt->from) {
+		struct rt6_info *ort = rt->from;
 
 		dst_hold(&ort->dst);
 		ip6_rt_put(rt);
@@ -4600,8 +4817,6 @@ static int __net_init ip6_route_net_init(struct net *net)
 					   GFP_KERNEL);
 	if (!net->ipv6.ip6_null_entry)
 		goto out_ip6_dst_entries;
-	net->ipv6.ip6_null_entry->dst.path =
-		(struct dst_entry *)net->ipv6.ip6_null_entry;
 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
 			 ip6_template_metrics, true);
@@ -4613,8 +4828,6 @@ static int __net_init ip6_route_net_init(struct net *net)
 					       GFP_KERNEL);
 	if (!net->ipv6.ip6_prohibit_entry)
 		goto out_ip6_null_entry;
-	net->ipv6.ip6_prohibit_entry->dst.path =
-		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
 			 ip6_template_metrics, true);
@@ -4624,8 +4837,6 @@ static int __net_init ip6_route_net_init(struct net *net)
 					       GFP_KERNEL);
 	if (!net->ipv6.ip6_blk_hole_entry)
 		goto out_ip6_prohibit_entry;
-	net->ipv6.ip6_blk_hole_entry->dst.path =
-		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
 			 ip6_template_metrics, true);
@@ -4782,11 +4993,20 @@ int __init ip6_route_init(void)
 	if (ret)
 		goto fib6_rules_init;
 
-	ret = -ENOBUFS;
-	if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) ||
-	    __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) ||
-	    __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL,
-			    RTNL_FLAG_DOIT_UNLOCKED))
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
+				   inet6_rtm_newroute, NULL, 0);
+	if (ret < 0)
+		goto out_register_late_subsys;
+
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
+				   inet6_rtm_delroute, NULL, 0);
+	if (ret < 0)
+		goto out_register_late_subsys;
+
+	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
+				   inet6_rtm_getroute, NULL,
+				   RTNL_FLAG_DOIT_UNLOCKED);
+	if (ret < 0)
 		goto out_register_late_subsys;
 
 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
@@ -4804,6 +5024,7 @@ int __init ip6_route_init(void)
 	return ret;
 
 out_register_late_subsys:
+	rtnl_unregister_all(PF_INET6);
 	unregister_pernet_subsys(&ip6_route_net_late_ops);
 fib6_rules_init:
 	fib6_rules_cleanup();
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index c814077..7f5621d 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -306,9 +306,7 @@ static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
 	struct seg6_hmac_info *hinfo;
 	int ret;
 
-	ret = rhashtable_walk_start(iter);
-	if (ret && ret != -EAGAIN)
-		goto done;
+	rhashtable_walk_start(iter);
 
 	for (;;) {
 		hinfo = rhashtable_walk_next(iter);
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 825b8e0..ba3767e 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -501,7 +501,7 @@ static struct seg6_action_desc *__get_action_desc(int action)
 	struct seg6_action_desc *desc;
 	int i, count;
 
-	count = sizeof(seg6_action_table) / sizeof(struct seg6_action_desc);
+	count = ARRAY_SIZE(seg6_action_table);
 	for (i = 0; i < count; i++) {
 		desc = &seg6_action_table[i];
 		if (desc->action == action)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7178476..c0f7e69 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -176,8 +176,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 			/* If interface is set while binding, indices
 			 * must coincide.
 			 */
-			if (sk->sk_bound_dev_if &&
-			    sk->sk_bound_dev_if != usin->sin6_scope_id)
+			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
 				return -EINVAL;
 
 			sk->sk_bound_dev_if = usin->sin6_scope_id;
@@ -1795,7 +1794,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
 		timer_expires = jiffies;
 	}
 
-	state = sk_state_load(sp);
+	state = inet_sk_state_load(sp);
 	if (state == TCP_LISTEN)
 		rx_queue = sp->sk_ack_backlog;
 	else
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3f30fa3..eecf9f0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -89,28 +89,12 @@ static u32 udp6_ehashfn(const struct net *net,
 			       udp_ipv6_hash_secret + net_hash_mix(net));
 }
 
-static u32 udp6_portaddr_hash(const struct net *net,
-			      const struct in6_addr *addr6,
-			      unsigned int port)
-{
-	unsigned int hash, mix = net_hash_mix(net);
-
-	if (ipv6_addr_any(addr6))
-		hash = jhash_1word(0, mix);
-	else if (ipv6_addr_v4mapped(addr6))
-		hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
-	else
-		hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
-
-	return hash ^ port;
-}
-
 int udp_v6_get_port(struct sock *sk, unsigned short snum)
 {
 	unsigned int hash2_nulladdr =
-		udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
+		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 	unsigned int hash2_partial =
-		udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
+		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 
 	/* precompute partial secondary hash */
 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -119,7 +103,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
 
 static void udp_v6_rehash(struct sock *sk)
 {
-	u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
 					  &sk->sk_v6_rcv_saddr,
 					  inet_sk(sk)->inet_num);
 
@@ -184,7 +168,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 		struct udp_hslot *hslot2, struct sk_buff *skb)
 {
 	struct sock *sk, *result;
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	result = NULL;
@@ -193,8 +177,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 		score = compute_score(sk, net, saddr, sport,
 				      daddr, hnum, dif, sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
 
@@ -202,15 +185,9 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			badness = score;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -228,11 +205,11 @@ struct sock *__udp6_lib_lookup(struct net *net,
 	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 	bool exact_dif = udp6_lib_exact_dif_match(net, skb);
-	int score, badness, matches = 0, reuseport = 0;
+	int score, badness;
 	u32 hash = 0;
 
 	if (hslot->count > 10) {
-		hash2 = udp6_portaddr_hash(net, daddr, hnum);
+		hash2 = ipv6_portaddr_hash(net, daddr, hnum);
 		slot2 = hash2 & udptable->mask;
 		hslot2 = &udptable->hash2[slot2];
 		if (hslot->count < hslot2->count)
@@ -243,7 +220,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
 					  hslot2, skb);
 		if (!result) {
 			unsigned int old_slot2 = slot2;
-			hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
+			hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
 			slot2 = hash2 & udptable->mask;
 			/* avoid searching the same slot again. */
 			if (unlikely(slot2 == old_slot2))
@@ -267,23 +244,16 @@ struct sock *__udp6_lib_lookup(struct net *net,
 		score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
 				      sdif, exact_dif);
 		if (score > badness) {
-			reuseport = sk->sk_reuseport;
-			if (reuseport) {
+			if (sk->sk_reuseport) {
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
 				result = reuseport_select_sock(sk, hash, skb,
 							sizeof(struct udphdr));
 				if (result)
 					return result;
-				matches = 1;
 			}
 			result = sk;
 			badness = score;
-		} else if (score == badness && reuseport) {
-			matches++;
-			if (reciprocal_scale(hash, matches) == 0)
-				result = sk;
-			hash = next_pseudo_random32(hash);
 		}
 	}
 	return result;
@@ -719,9 +689,9 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 	struct sk_buff *nskb;
 
 	if (use_hash2) {
-		hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
+		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
 			    udptable->mask;
-		hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
+		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
 start_lookup:
 		hslot = &udptable->hash2[hash2];
 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
@@ -909,7 +879,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net,
 			int dif, int sdif)
 {
 	unsigned short hnum = ntohs(loc_port);
-	unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum);
+	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
 	unsigned int slot2 = hash2 & udp_table.mask;
 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 02556e3..4e12859 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -59,7 +59,7 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 	if (x->props.flags & XFRM_STATE_NOECN)
 		dsfield &= ~INET_ECN_MASK;
 	ipv6_change_dsfield(top_iph, 0, dsfield);
-	top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
+	top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
 	top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
 	top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
 	return 0;
@@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x,
 {
 	__skb_push(skb, skb->mac_len);
 	return skb_mac_gso_segment(skb, features);
-
 }
 
 static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb)
 {
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
-	if (xo->flags & XFRM_GSO_SEGMENT) {
-		skb->network_header = skb->network_header - x->props.header_len;
+	if (xo->flags & XFRM_GSO_SEGMENT)
 		skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
-	}
 
 	skb_reset_mac_len(skb);
 	pskb_pull(skb, skb->mac_len + x->props.header_len);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 885ade2..09fb44e 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -265,7 +265,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 			in6_dev_put(xdst->u.rt6.rt6i_idev);
 			xdst->u.rt6.rt6i_idev = loopback_idev;
 			in6_dev_hold(loopback_idev);
-			xdst = (struct xfrm_dst *)xdst->u.dst.child;
+			xdst = (struct xfrm_dst *)xfrm_dst_child(&xdst->u.dst);
 		} while (xdst->u.dst.xfrm);
 
 		__in6_dev_put(loopback_idev);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 115918a..62285fc 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -662,10 +662,9 @@ static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
- * Cookie value, sublayer format and offset (pad) are negotiated with
- * the peer when the session is set up. Unlike L2TPv2, we do not need
- * to parse the packet header to determine if optional fields are
- * present.
+ * Cookie value and sublayer format are negotiated with the peer when
+ * the session is set up. Unlike L2TPv2, we do not need to parse the
+ * packet header to determine if optional fields are present.
  *
  * Caller must already have parsed the frame and determined that it is
  * a data (not control) frame before coming here. Fields up to the
@@ -780,10 +779,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
 		}
 	}
 
-	/* Session data offset is handled differently for L2TPv2 and
-	 * L2TPv3. For L2TPv2, there is an optional 16-bit value in
-	 * the header. For L2TPv3, the offset is negotiated using AVPs
-	 * in the session setup control protocol.
+	/* Session data offset is defined only for L2TPv2 and is
+	 * indicated by an optional 16-bit value in the header.
 	 */
 	if (tunnel->version == L2TP_HDR_VER_2) {
 		/* If offset bit set, skip it. */
@@ -791,8 +788,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
 			offset = ntohs(*(__be16 *)ptr);
 			ptr += 2 + offset;
 		}
-	} else
-		ptr += session->offset;
+	}
 
 	offset = ptr - optr;
 	if (!pskb_may_pull(skb, offset))
@@ -1068,8 +1064,6 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
 		}
 		bufp += session->l2specific_len;
 	}
-	if (session->offset)
-		bufp += session->offset;
 
 	return bufp - optr;
 }
@@ -1734,7 +1728,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
 		if (session->send_seq)
 			session->hdr_len += 4;
 	} else {
-		session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
+		session->hdr_len = 4 + session->cookie_len + session->l2specific_len;
 		if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
 			session->hdr_len += 4;
 	}
@@ -1784,7 +1778,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
 			session->recv_seq = cfg->recv_seq;
 			session->lns_mode = cfg->lns_mode;
 			session->reorder_timeout = cfg->reorder_timeout;
-			session->offset = cfg->offset;
 			session->l2specific_type = cfg->l2specific_type;
 			session->l2specific_len = cfg->l2specific_len;
 			session->cookie_len = cfg->cookie_len;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9534e16..c2e9bbd 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -59,7 +59,6 @@ struct l2tp_session_cfg {
 	int			debug;		/* bitmask of debug message
 						 * categories */
 	u16			vlan_id;	/* VLAN pseudowire only */
-	u16			offset;		/* offset to payload */
 	u16			l2specific_len;	/* Layer 2 specific length */
 	u16			l2specific_type; /* Layer 2 specific type */
 	u8			cookie[8];	/* optional cookie */
@@ -86,8 +85,6 @@ struct l2tp_session {
 	int			cookie_len;
 	u8			peer_cookie[8];
 	int			peer_cookie_len;
-	u16			offset;		/* offset from end of L2TP header
-						   to beginning of data */
 	u16			l2specific_len;
 	u16			l2specific_type;
 	u16			hdr_len;
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index eb69411..2c30587 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -180,8 +180,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
 		   session->lns_mode ? "LNS" : "LAC",
 		   session->debug,
 		   jiffies_to_msecs(session->reorder_timeout));
-	seq_printf(m, "   offset %hu l2specific %hu/%hu\n",
-		   session->offset, session->l2specific_type, session->l2specific_len);
+	seq_printf(m, "   offset 0 l2specific %hu/%hu\n",
+		   session->l2specific_type, session->l2specific_len);
 	if (session->cookie_len) {
 		seq_printf(m, "   cookie %02x%02x%02x%02x",
 			   session->cookie[0], session->cookie[1],
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index a1f24fb..e1ca29f 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -547,9 +547,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
 	}
 
 	if (tunnel->version > 2) {
-		if (info->attrs[L2TP_ATTR_OFFSET])
-			cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
-
 		if (info->attrs[L2TP_ATTR_DATA_SEQ])
 			cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
 
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index d444752..a8b1616 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -153,27 +153,16 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
  */
 static void sta_rx_agg_session_timer_expired(struct timer_list *t)
 {
-	struct tid_ampdu_rx *tid_rx_timer =
-		from_timer(tid_rx_timer, t, session_timer);
-	struct sta_info *sta = tid_rx_timer->sta;
-	u8 tid = tid_rx_timer->tid;
-	struct tid_ampdu_rx *tid_rx;
+	struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer);
+	struct sta_info *sta = tid_rx->sta;
+	u8 tid = tid_rx->tid;
 	unsigned long timeout;
 
-	rcu_read_lock();
-	tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
-	if (!tid_rx) {
-		rcu_read_unlock();
-		return;
-	}
-
 	timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
 	if (time_is_after_jiffies(timeout)) {
 		mod_timer(&tid_rx->session_timer, timeout);
-		rcu_read_unlock();
 		return;
 	}
-	rcu_read_unlock();
 
 	ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
 	       sta->sta.addr, tid);
@@ -415,10 +404,11 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
 					  timeout);
 }
 
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
-				     u8 dialog_token, u16 timeout,
-				     u16 start_seq_num, u16 ba_policy, u16 tid,
-				     u16 buf_size, bool tx, bool auto_seq)
+static void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+					    u8 dialog_token, u16 timeout,
+					    u16 start_seq_num, u16 ba_policy,
+					    u16 tid, u16 buf_size, bool tx,
+					    bool auto_seq)
 {
 	mutex_lock(&sta->ampdu_mlme.mtx);
 	___ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5f8ab5b..595c662 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -392,7 +392,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 	 * telling the driver. New packets will not go through since
 	 * the aggregation session is no longer OPERATIONAL.
 	 */
-	synchronize_net();
+	if (!local->in_reconfig)
+		synchronize_net();
 
 	tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
 					WLAN_BACK_RECIPIENT :
@@ -429,18 +430,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
  */
 static void sta_addba_resp_timer_expired(struct timer_list *t)
 {
-	struct tid_ampdu_tx *tid_tx_timer =
-		from_timer(tid_tx_timer, t, addba_resp_timer);
-	struct sta_info *sta = tid_tx_timer->sta;
-	u8 tid = tid_tx_timer->tid;
-	struct tid_ampdu_tx *tid_tx;
+	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer);
+	struct sta_info *sta = tid_tx->sta;
+	u8 tid = tid_tx->tid;
 
 	/* check if the TID waits for addBA response */
-	rcu_read_lock();
-	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
-	if (!tid_tx ||
-	    test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
-		rcu_read_unlock();
+	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
 		ht_dbg(sta->sdata,
 		       "timer expired on %pM tid %d not expecting addBA response\n",
 		       sta->sta.addr, tid);
@@ -451,7 +446,6 @@ static void sta_addba_resp_timer_expired(struct timer_list *t)
 	       sta->sta.addr, tid);
 
 	ieee80211_stop_tx_ba_session(&sta->sta, tid);
-	rcu_read_unlock();
 }
 
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -529,29 +523,21 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
  */
 static void sta_tx_agg_session_timer_expired(struct timer_list *t)
 {
-	struct tid_ampdu_tx *tid_tx_timer =
-		from_timer(tid_tx_timer, t, session_timer);
-	struct sta_info *sta = tid_tx_timer->sta;
-	u8 tid = tid_tx_timer->tid;
-	struct tid_ampdu_tx *tid_tx;
+	struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer);
+	struct sta_info *sta = tid_tx->sta;
+	u8 tid = tid_tx->tid;
 	unsigned long timeout;
 
-	rcu_read_lock();
-	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
-	if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-		rcu_read_unlock();
+	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
 		return;
 	}
 
 	timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
 	if (time_is_after_jiffies(timeout)) {
 		mod_timer(&tid_tx->session_timer, timeout);
-		rcu_read_unlock();
 		return;
 	}
 
-	rcu_read_unlock();
-
 	ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
 	       sta->sta.addr, tid);
 
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fb15d3b9..46028e1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -573,10 +573,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
 	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
 		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
 			     offsetof(typeof(kseq), aes_cmac));
+		/* fall through */
 	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
 	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
 		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
 			     offsetof(typeof(kseq), aes_gmac));
+		/* fall through */
 	case WLAN_CIPHER_SUITE_GCMP:
 	case WLAN_CIPHER_SUITE_GCMP_256:
 		BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) !=
@@ -2205,6 +2207,7 @@ static int ieee80211_scan(struct wiphy *wiphy,
 		 * for now fall through to allow scanning only when
 		 * beaconing hasn't been configured yet
 		 */
+		/* fall through */
 	case NL80211_IFTYPE_AP:
 		/*
 		 * If the scan has been forced (and the driver supports
@@ -2373,10 +2376,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
 	struct ieee80211_sub_if_data *sdata;
 	enum nl80211_tx_power_setting txp_type = type;
 	bool update_txp_type = false;
+	bool has_monitor = false;
 
 	if (wdev) {
 		sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			sdata = rtnl_dereference(local->monitor_sdata);
+			if (!sdata)
+				return -EOPNOTSUPP;
+		}
+
 		switch (type) {
 		case NL80211_TX_POWER_AUTOMATIC:
 			sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -2415,15 +2425,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
 
 	mutex_lock(&local->iflist_mtx);
 	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			has_monitor = true;
+			continue;
+		}
 		sdata->user_power_level = local->user_power_level;
 		if (txp_type != sdata->vif.bss_conf.txpower_type)
 			update_txp_type = true;
 		sdata->vif.bss_conf.txpower_type = txp_type;
 	}
-	list_for_each_entry(sdata, &local->interfaces, list)
+	list_for_each_entry(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
+			continue;
 		ieee80211_recalc_txpower(sdata, update_txp_type);
+	}
 	mutex_unlock(&local->iflist_mtx);
 
+	if (has_monitor) {
+		sdata = rtnl_dereference(local->monitor_sdata);
+		if (sdata) {
+			sdata->user_power_level = local->user_power_level;
+			if (txp_type != sdata->vif.bss_conf.txpower_type)
+				update_txp_type = true;
+			sdata->vif.bss_conf.txpower_type = txp_type;
+
+			ieee80211_recalc_txpower(sdata, update_txp_type);
+		}
+	}
+
 	return 0;
 }
 
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 5fae001f..1f466d1 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -211,6 +211,7 @@ static const char *hw_flag_names[] = {
 	FLAG(TX_FRAG_LIST),
 	FLAG(REPORTS_LOW_ACK),
 	FLAG(SUPPORTS_TX_FRAG),
+	FLAG(SUPPORTS_TDLS_BUFFER_STA),
 #undef FLAG
 };
 
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c7f93fd..4d82fe7 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -165,7 +165,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
 	if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
 			 sdata->vif.type == NL80211_IFTYPE_NAN ||
 			 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-			  !sdata->vif.mu_mimo_owner)))
+			  !sdata->vif.mu_mimo_owner &&
+			  !(changed & BSS_CHANGED_TXPOWER))))
 		return;
 
 	if (!check_sdata_in_driver(sdata))
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 1621b6a..d752353 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -492,6 +492,7 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
 	case IEEE80211_SMPS_AUTOMATIC:
 	case IEEE80211_SMPS_NUM_MODES:
 		WARN_ON(1);
+		/* fall through */
 	case IEEE80211_SMPS_OFF:
 		action_frame->u.action.u.ht_smps.smps_control =
 				WLAN_HT_SMPS_CONTROL_DISABLED;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 885d00b..2690002 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1757,10 +1757,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				     u16 initiator, u16 reason, bool stop);
 void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 				    u16 initiator, u16 reason, bool stop);
-void __ieee80211_start_rx_ba_session(struct sta_info *sta,
-				     u8 dialog_token, u16 timeout,
-				     u16 start_seq_num, u16 ba_policy, u16 tid,
-				     u16 buf_size, bool tx, bool auto_seq);
 void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
 				      u8 dialog_token, u16 timeout,
 				      u16 start_seq_num, u16 ba_policy, u16 tid,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 13b16f9..5fe01f8 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1474,7 +1474,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
 		break;
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NUM_NL80211_IFTYPES:
-		BUG();
+		WARN_ON(1);
 		break;
 	}
 
@@ -1633,7 +1633,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
 				goto out_unlock;
 			}
 		}
-		/* otherwise fall through */
+		/* fall through */
 	default:
 		/* assign a new address if possible -- try n_addresses first */
 		for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 9380493..aee05ec 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -178,13 +178,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 	if (!ret) {
 		key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-		if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+		if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+					   IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
 		      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
 			decrease_tailroom_need_count(sdata, 1);
 
 		WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
 			(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
 
+		WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) &&
+			(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC));
+
 		return 0;
 	}
 
@@ -237,7 +241,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
 	sta = key->sta;
 	sdata = key->sdata;
 
-	if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+	if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+				   IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
 	      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
 		increment_tailroom_need_count(sdata);
 
@@ -1104,7 +1109,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
 	if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
 		key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
 
-		if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+		if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+					   IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) ||
 		      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
 			increment_tailroom_need_count(key->sdata);
 	}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index e054a2f..0785d04 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -263,6 +263,9 @@ static void ieee80211_restart_work(struct work_struct *work)
 	flush_delayed_work(&local->roc_work);
 	flush_work(&local->hw_roc_done);
 
+	/* wait for all packet processing to be done */
+	synchronize_net();
+
 	ieee80211_reconfig(local);
 	rtnl_unlock();
 }
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 5e27364..73ac607 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -989,8 +989,10 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
 	switch (sdata->vif.bss_conf.chandef.width) {
 	case NL80211_CHAN_WIDTH_20_NOHT:
 		sta_flags |= IEEE80211_STA_DISABLE_HT;
+		/* fall through */
 	case NL80211_CHAN_WIDTH_20:
 		sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+		/* fall through */
 	case NL80211_CHAN_WIDTH_40:
 		sta_flags |= IEEE80211_STA_DISABLE_VHT;
 		break;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 4394463a..35ad398 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1250,6 +1250,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
 		break;
 	case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
 		flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
+		/* fall through */
 	case IEEE80211_PROACTIVE_PREQ_NO_PREP:
 		interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
 		target_flags |= IEEE80211_PREQ_TO_FLAG |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 86c8dfe..a512562 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -257,9 +257,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 	if (ret)
 		return NULL;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto err;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -269,7 +267,6 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
 		if (i++ == idx)
 			break;
 	}
-err:
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 
@@ -513,9 +510,7 @@ void mesh_plink_broken(struct sta_info *sta)
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -535,7 +530,6 @@ void mesh_plink_broken(struct sta_info *sta)
 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
 		}
 	}
-out:
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -584,9 +578,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -597,7 +589,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
 		if (rcu_access_pointer(mpath->next_hop) == sta)
 			__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -614,9 +606,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -627,7 +617,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
 		if (ether_addr_equal(mpath->mpp, proxy))
 			__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -642,9 +632,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -653,7 +641,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
 			break;
 		__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
@@ -873,9 +861,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
 	if (ret)
 		return;
 
-	ret = rhashtable_walk_start(&iter);
-	if (ret && ret != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&iter);
 
 	while ((mpath = rhashtable_walk_next(&iter))) {
 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
@@ -887,7 +873,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
 		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
 			__mesh_path_del(tbl, mpath);
 	}
-out:
+
 	rhashtable_walk_stop(&iter);
 	rhashtable_walk_exit(&iter);
 }
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index e2d00cc..0f6c9ca 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -672,7 +672,7 @@ void mesh_plink_timer(struct timer_list *t)
 			break;
 		}
 		reason = WLAN_REASON_MESH_MAX_RETRIES;
-		/* fall through on else */
+		/* fall through */
 	case NL80211_PLINK_CNF_RCVD:
 		/* confirm timer */
 		if (!reason)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index c244691..39b660b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -473,6 +473,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
 	case IEEE80211_SMPS_AUTOMATIC:
 	case IEEE80211_SMPS_NUM_MODES:
 		WARN_ON(1);
+		/* fall through */
 	case IEEE80211_SMPS_OFF:
 		cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
 			IEEE80211_HT_CAP_SM_PS_SHIFT;
@@ -2861,10 +2862,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
 	aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
 	capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
 
-	if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
-		sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
-			   aid);
-	aid &= ~(BIT(15) | BIT(14));
+	/*
+	 * The 5 MSB of the AID field are reserved
+	 * (802.11-2016 9.4.1.8 AID field)
+	 */
+	aid &= 0x7ff;
 
 	ifmgd->broken_ap = false;
 
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index faf4f60..f1d40b6 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -801,14 +801,14 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 	case NL80211_IFTYPE_ADHOC:
 		if (!sdata->vif.bss_conf.ibss_joined)
 			need_offchan = true;
-		/* fall through */
 #ifdef CONFIG_MAC80211_MESH
+		/* fall through */
 	case NL80211_IFTYPE_MESH_POINT:
 		if (ieee80211_vif_is_mesh(&sdata->vif) &&
 		    !sdata->u.mesh.mesh_id_len)
 			need_offchan = true;
-		/* fall through */
 #endif
+		/* fall through */
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
 	case NL80211_IFTYPE_P2P_GO:
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 4daafb0..fd58061 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1607,23 +1607,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
 
 	/*
 	 * Change STA power saving mode only at the end of a frame
-	 * exchange sequence.
+	 * exchange sequence, and only for a data or management
+	 * frame as specified in IEEE 802.11-2016 11.2.3.2
 	 */
 	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
 	    !ieee80211_has_morefrags(hdr->frame_control) &&
-	    !ieee80211_is_back_req(hdr->frame_control) &&
+	    (ieee80211_is_mgmt(hdr->frame_control) ||
+	     ieee80211_is_data(hdr->frame_control)) &&
 	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
 	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
-	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
-	    /*
-	     * PM bit is only checked in frames where it isn't reserved,
-	     * in AP mode it's reserved in non-bufferable management frames
-	     * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
-	     * BAR frames should be ignored as specified in
-	     * IEEE 802.11-2012 10.2.1.2.
-	     */
-	    (!ieee80211_is_mgmt(hdr->frame_control) ||
-	     ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
+	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
 		if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
 			if (!ieee80211_has_pm(hdr->frame_control))
 				sta_ps_end(sta);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 91093d4..5cd5e6e 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -47,6 +47,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
 			   NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
 	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
 			  !ifmgd->tdls_wider_bw_prohibited;
+	bool buffer_sta = ieee80211_hw_check(&local->hw,
+					     SUPPORTS_TDLS_BUFFER_STA);
 	struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata);
 	bool vht = sband && sband->vht_cap.vht_supported;
 	u8 *pos = skb_put(skb, 10);
@@ -56,7 +58,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
 	*pos++ = 0x0;
 	*pos++ = 0x0;
 	*pos++ = 0x0;
-	*pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
+	*pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) |
+		 (buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0);
 	*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
 	*pos++ = 0;
 	*pos++ = 0;
@@ -236,6 +239,7 @@ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac)
 	switch (ac) {
 	default:
 		WARN_ON_ONCE(1);
+		/* fall through */
 	case 0:
 		return IEEE80211_AC_BE;
 	case 1:
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3160954..25904af 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2922,7 +2922,9 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
 
 		gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
 		iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
-		mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		mmic = build.key->conf.flags &
+			(IEEE80211_KEY_FLAG_GENERATE_MMIC |
+			 IEEE80211_KEY_FLAG_PUT_MIC_SPACE);
 
 		/* don't handle software crypto */
 		if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d57e5f6..1f82191 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2110,15 +2110,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
 		cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0);
 
  wake_up:
-	if (local->in_reconfig) {
-		local->in_reconfig = false;
-		barrier();
-
-		/* Restart deferred ROCs */
-		mutex_lock(&local->mtx);
-		ieee80211_start_next_roc(local);
-		mutex_unlock(&local->mtx);
-	}
 
 	if (local->monitors == local->open_count && local->monitors > 0)
 		ieee80211_add_virtual_monitor(local);
@@ -2146,6 +2137,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
 		mutex_unlock(&local->sta_mtx);
 	}
 
+	if (local->in_reconfig) {
+		local->in_reconfig = false;
+		barrier();
+
+		/* Restart deferred ROCs */
+		mutex_lock(&local->mtx);
+		ieee80211_start_next_roc(local);
+		mutex_unlock(&local->mtx);
+	}
+
 	ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
 					IEEE80211_QUEUE_STOP_REASON_SUSPEND,
 					false);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 3e3d301..5f7c963 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -165,6 +165,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
 			qos = sta->sta.wme;
 			break;
 		}
+		/* fall through */
 	case NL80211_IFTYPE_AP:
 		ra = skb->data;
 		break;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index b58722d..785056c 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -1,7 +1,7 @@
 /*
  * Copyright 2002-2004, Instant802 Networks, Inc.
  * Copyright 2008, Jouni Malinen <j@w1.fi>
- * Copyright (C) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2016-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -59,8 +59,9 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 	if (info->control.hw_key &&
 	    (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
 	     ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) &&
-	    !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
-		/* hwaccel - with no need for SW-generated MMIC */
+	    !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
+				     IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) {
+		/* hwaccel - with no need for SW-generated MMIC or MIC space */
 		return TX_CONTINUE;
 	}
 
@@ -75,8 +76,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
 		 skb_tailroom(skb), tail))
 		return TX_DROP;
 
-	key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
 	mic = skb_put(skb, MICHAEL_MIC_LEN);
+
+	if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) {
+		/* Zeroed MIC can help with debug */
+		memset(mic, 0, MICHAEL_MIC_LEN);
+		return TX_CONTINUE;
+	}
+
+	key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
 	michael_mic(key, hdr, data, data_len, mic);
 	if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
 		mic[0]++;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 8ca9915..5dce833 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2510,12 +2510,15 @@ static int __init mpls_init(void)
 
 	rtnl_af_register(&mpls_af_ops);
 
-	rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0);
-	rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0);
-	rtnl_register(PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes,
-		      0);
-	rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf,
-		      mpls_netconf_dump_devconf, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_NEWROUTE,
+			     mpls_rtm_newroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_DELROUTE,
+			     mpls_rtm_delroute, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETROUTE,
+			     mpls_getroute, mpls_dump_routes, 0);
+	rtnl_register_module(THIS_MODULE, PF_MPLS, RTM_GETNETCONF,
+			     mpls_netconf_get_devconf,
+			     mpls_netconf_dump_devconf, 0);
 	err = ipgre_tunnel_encap_add_mpls_ops();
 	if (err)
 		pr_err("Can't add mpls over gre tunnel ops\n");
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 67e708e..e7b05de 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -143,43 +143,14 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
 	if (!nc)
 		return -ENODEV;
 
-	/* If the channel is active one, we need reconfigure it */
 	spin_lock_irqsave(&nc->lock, flags);
 	ncm = &nc->modes[NCSI_MODE_LINK];
 	hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
 	ncm->data[3] = ntohl(hncdsc->status);
-	netdev_info(ndp->ndev.dev, "NCSI: HNCDSC AEN - channel %u state %s\n",
-		    nc->id, ncm->data[3] & 0x3 ? "up" : "down");
-	if (!list_empty(&nc->link) ||
-	    nc->state != NCSI_CHANNEL_ACTIVE) {
-		spin_unlock_irqrestore(&nc->lock, flags);
-		return 0;
-	}
-
 	spin_unlock_irqrestore(&nc->lock, flags);
-	if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
-		ndp->flags |= NCSI_DEV_RESHUFFLE;
-
-	/* If this channel is the active one and the link doesn't
-	 * work, we have to choose another channel to be active one.
-	 * The logic here is exactly similar to what we do when link
-	 * is down on the active channel.
-	 *
-	 * On the other hand, we need configure it when host driver
-	 * state on the active channel becomes ready.
-	 */
-	ncsi_stop_channel_monitor(nc);
-
-	spin_lock_irqsave(&nc->lock, flags);
-	nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
-					   NCSI_CHANNEL_ACTIVE;
-	spin_unlock_irqrestore(&nc->lock, flags);
-
-	spin_lock_irqsave(&ndp->lock, flags);
-	list_add_tail_rcu(&nc->link, &ndp->channel_queue);
-	spin_unlock_irqrestore(&ndp->lock, flags);
-
-	ncsi_process_next_channel(ndp);
+	netdev_printk(KERN_DEBUG, ndp->ndev.dev,
+		      "NCSI: host driver %srunning on channel %u\n",
+		      ncm->data[3] & 0x1 ? "" : "not ", nc->id);
 
 	return 0;
 }
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e4a13cc..0ee0fcf 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -12,6 +12,12 @@
 config NETFILTER_NETLINK
 	tristate
 
+config NETFILTER_FAMILY_BRIDGE
+	bool
+
+config NETFILTER_FAMILY_ARP
+	bool
+
 config NETFILTER_NETLINK_ACCT
 tristate "Netfilter NFACCT over NFNETLINK interface"
 	depends on NETFILTER_ADVANCED
@@ -62,6 +68,8 @@
 	select NF_LOG_COMMON
 
 if NF_CONNTRACK
+config NETFILTER_CONNCOUNT
+	tristate
 
 config NF_CONNTRACK_MARK
 	bool  'Connection mark tracking support'
@@ -497,6 +505,13 @@
 	  This option adds the "ct" expression that you can use to match
 	  connection tracking information such as the flow state.
 
+config NFT_FLOW_OFFLOAD
+	depends on NF_CONNTRACK
+	tristate "Netfilter nf_tables hardware flow offload module"
+	help
+	  This option adds the "flow_offload" expression that you can use to
+	  choose what flows are placed into the hardware.
+
 config NFT_SET_RBTREE
 	tristate "Netfilter nf_tables rbtree set module"
 	help
@@ -649,6 +664,21 @@
 
 endif # NF_TABLES
 
+config NF_FLOW_TABLE_INET
+	select NF_FLOW_TABLE
+	tristate "Netfilter flow table mixed IPv4/IPv6 module"
+	help
+          This option adds the flow table mixed IPv4/IPv6 support.
+
+	  To compile it as a module, choose M here.
+
+config NF_FLOW_TABLE
+	tristate "Netfilter flow table module"
+	help
+	  This option adds the flow table core infrastructure.
+
+	  To compile it as a module, choose M here.
+
 config NETFILTER_XTABLES
 	tristate "Netfilter Xtables support (required for ip_tables)"
 	default m if NETFILTER_ADVANCED=n
@@ -1120,6 +1150,7 @@
 	tristate '"connlimit" match support'
 	depends on NF_CONNTRACK
 	depends on NETFILTER_ADVANCED
+	select NETFILTER_CONNCOUNT
 	---help---
 	  This match allows you to match against the number of parallel
 	  connections to a server per client IP address (or address block).
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f78ed24..5d9b8b9 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
+netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o utils.o
 
 nf_conntrack-y	:= nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o
 nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
@@ -67,6 +67,8 @@
 # SYNPROXY
 obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
 
+obj-$(CONFIG_NETFILTER_CONNCOUNT) += nf_conncount.o
+
 # generic packet duplication from netdev family
 obj-$(CONFIG_NF_DUP_NETDEV)	+= nf_dup_netdev.o
 
@@ -84,6 +86,7 @@
 obj-$(CONFIG_NFT_RT)		+= nft_rt.o
 obj-$(CONFIG_NFT_NUMGEN)	+= nft_numgen.o
 obj-$(CONFIG_NFT_CT)		+= nft_ct.o
+obj-$(CONFIG_NFT_FLOW_OFFLOAD)	+= nft_flow_offload.o
 obj-$(CONFIG_NFT_LIMIT)		+= nft_limit.o
 obj-$(CONFIG_NFT_NAT)		+= nft_nat.o
 obj-$(CONFIG_NFT_OBJREF)	+= nft_objref.o
@@ -107,6 +110,10 @@
 obj-$(CONFIG_NFT_DUP_NETDEV)	+= nft_dup_netdev.o
 obj-$(CONFIG_NFT_FWD_NETDEV)	+= nft_fwd_netdev.o
 
+# flow table infrastructure
+obj-$(CONFIG_NF_FLOW_TABLE)	+= nf_flow_table.o
+obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
+
 # generic X tables 
 obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 52cd290..997dd38 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -4,8 +4,7 @@
  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
  * way.
  *
- * Rusty Russell (C)2000 -- This code is GPL.
- * Patrick McHardy (c) 2006-2012
+ * This code is GPL.
  */
 #include <linux/kernel.h>
 #include <linux/netfilter.h>
@@ -28,34 +27,12 @@
 
 #include "nf_internals.h"
 
-static DEFINE_MUTEX(afinfo_mutex);
-
-const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-EXPORT_SYMBOL(nf_afinfo);
 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
 DEFINE_PER_CPU(bool, nf_skb_duplicated);
 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
 
-int nf_register_afinfo(const struct nf_afinfo *afinfo)
-{
-	mutex_lock(&afinfo_mutex);
-	RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
-	mutex_unlock(&afinfo_mutex);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(nf_register_afinfo);
-
-void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
-{
-	mutex_lock(&afinfo_mutex);
-	RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
-	mutex_unlock(&afinfo_mutex);
-	synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
-
 #ifdef HAVE_JUMP_LABEL
 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
@@ -74,7 +51,8 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
 	struct nf_hook_entries *e;
 	size_t alloc = sizeof(*e) +
 		       sizeof(struct nf_hook_entry) * num +
-		       sizeof(struct nf_hook_ops *) * num;
+		       sizeof(struct nf_hook_ops *) * num +
+		       sizeof(struct nf_hook_entries_rcu_head);
 
 	if (num == 0)
 		return NULL;
@@ -85,6 +63,30 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
 	return e;
 }
 
+static void __nf_hook_entries_free(struct rcu_head *h)
+{
+	struct nf_hook_entries_rcu_head *head;
+
+	head = container_of(h, struct nf_hook_entries_rcu_head, head);
+	kvfree(head->allocation);
+}
+
+static void nf_hook_entries_free(struct nf_hook_entries *e)
+{
+	struct nf_hook_entries_rcu_head *head;
+	struct nf_hook_ops **ops;
+	unsigned int num;
+
+	if (!e)
+		return;
+
+	num = e->num_hook_entries;
+	ops = nf_hook_entries_get_hook_ops(e);
+	head = (void *)&ops[num];
+	head->allocation = e;
+	call_rcu(&head->head, __nf_hook_entries_free);
+}
+
 static unsigned int accept_all(void *priv,
 			       struct sk_buff *skb,
 			       const struct nf_hook_state *state)
@@ -135,6 +137,12 @@ nf_hook_entries_grow(const struct nf_hook_entries *old,
 			++i;
 			continue;
 		}
+
+		if (reg->nat_hook && orig_ops[i]->nat_hook) {
+			kvfree(new);
+			return ERR_PTR(-EEXIST);
+		}
+
 		if (inserted || reg->priority > orig_ops[i]->priority) {
 			new_ops[nhooks] = (void *)orig_ops[i];
 			new->hooks[nhooks] = old->hooks[i];
@@ -237,27 +245,61 @@ static void *__nf_hook_entries_try_shrink(struct nf_hook_entries __rcu **pp)
 	return old;
 }
 
-static struct nf_hook_entries __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg)
+static struct nf_hook_entries __rcu **
+nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
+		   struct net_device *dev)
 {
-	if (reg->pf != NFPROTO_NETDEV)
-		return net->nf.hooks[reg->pf]+reg->hooknum;
+	switch (pf) {
+	case NFPROTO_NETDEV:
+		break;
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+	case NFPROTO_ARP:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
+			return NULL;
+		return net->nf.hooks_arp + hooknum;
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	case NFPROTO_BRIDGE:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
+			return NULL;
+		return net->nf.hooks_bridge + hooknum;
+#endif
+	case NFPROTO_IPV4:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
+			return NULL;
+		return net->nf.hooks_ipv4 + hooknum;
+	case NFPROTO_IPV6:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
+			return NULL;
+		return net->nf.hooks_ipv6 + hooknum;
+#if IS_ENABLED(CONFIG_DECNET)
+	case NFPROTO_DECNET:
+		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum))
+			return NULL;
+		return net->nf.hooks_decnet + hooknum;
+#endif
+	default:
+		WARN_ON_ONCE(1);
+		return NULL;
+	}
 
 #ifdef CONFIG_NETFILTER_INGRESS
-	if (reg->hooknum == NF_NETDEV_INGRESS) {
-		if (reg->dev && dev_net(reg->dev) == net)
-			return &reg->dev->nf_hooks_ingress;
+	if (hooknum == NF_NETDEV_INGRESS) {
+		if (dev && dev_net(dev) == net)
+			return &dev->nf_hooks_ingress;
 	}
 #endif
 	WARN_ON_ONCE(1);
 	return NULL;
 }
 
-int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+static int __nf_register_net_hook(struct net *net, int pf,
+				  const struct nf_hook_ops *reg)
 {
 	struct nf_hook_entries *p, *new_hooks;
 	struct nf_hook_entries __rcu **pp;
 
-	if (reg->pf == NFPROTO_NETDEV) {
+	if (pf == NFPROTO_NETDEV) {
 #ifndef CONFIG_NETFILTER_INGRESS
 		if (reg->hooknum == NF_NETDEV_INGRESS)
 			return -EOPNOTSUPP;
@@ -267,7 +309,7 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
 			return -EINVAL;
 	}
 
-	pp = nf_hook_entry_head(net, reg);
+	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
 	if (!pp)
 		return -EINVAL;
 
@@ -285,21 +327,19 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
 
 	hooks_validate(new_hooks);
 #ifdef CONFIG_NETFILTER_INGRESS
-	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+	if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
 		net_inc_ingress_queue();
 #endif
 #ifdef HAVE_JUMP_LABEL
-	static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+	static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
 #endif
-	synchronize_net();
 	BUG_ON(p == new_hooks);
-	kvfree(p);
+	nf_hook_entries_free(p);
 	return 0;
 }
-EXPORT_SYMBOL(nf_register_net_hook);
 
 /*
- * __nf_unregister_net_hook - remove a hook from blob
+ * nf_remove_net_hook - remove a hook from blob
  *
  * @oldp: current address of hook blob
  * @unreg: hook to unregister
@@ -307,8 +347,8 @@ EXPORT_SYMBOL(nf_register_net_hook);
  * This cannot fail, hook unregistration must always succeed.
  * Therefore replace the to-be-removed hook with a dummy hook.
  */
-static void __nf_unregister_net_hook(struct nf_hook_entries *old,
-				     const struct nf_hook_ops *unreg)
+static void nf_remove_net_hook(struct nf_hook_entries *old,
+			       const struct nf_hook_ops *unreg, int pf)
 {
 	struct nf_hook_ops **orig_ops;
 	bool found = false;
@@ -326,24 +366,24 @@ static void __nf_unregister_net_hook(struct nf_hook_entries *old,
 
 	if (found) {
 #ifdef CONFIG_NETFILTER_INGRESS
-		if (unreg->pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS)
+		if (pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS)
 			net_dec_ingress_queue();
 #endif
 #ifdef HAVE_JUMP_LABEL
-		static_key_slow_dec(&nf_hooks_needed[unreg->pf][unreg->hooknum]);
+		static_key_slow_dec(&nf_hooks_needed[pf][unreg->hooknum]);
 #endif
 	} else {
-		WARN_ONCE(1, "hook not found, pf %d num %d", unreg->pf, unreg->hooknum);
+		WARN_ONCE(1, "hook not found, pf %d num %d", pf, unreg->hooknum);
 	}
 }
 
-void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+void __nf_unregister_net_hook(struct net *net, int pf,
+			      const struct nf_hook_ops *reg)
 {
 	struct nf_hook_entries __rcu **pp;
 	struct nf_hook_entries *p;
-	unsigned int nfq;
 
-	pp = nf_hook_entry_head(net, reg);
+	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
 	if (!pp)
 		return;
 
@@ -355,23 +395,52 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 		return;
 	}
 
-	__nf_unregister_net_hook(p, reg);
+	nf_remove_net_hook(p, reg, pf);
 
 	p = __nf_hook_entries_try_shrink(pp);
 	mutex_unlock(&nf_hook_mutex);
 	if (!p)
 		return;
 
-	synchronize_net();
+	nf_queue_nf_hook_drop(net);
+	nf_hook_entries_free(p);
+}
 
-	/* other cpu might still process nfqueue verdict that used reg */
-	nfq = nf_queue_nf_hook_drop(net);
-	if (nfq)
-		synchronize_net();
-	kvfree(p);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+	if (reg->pf == NFPROTO_INET) {
+		__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+		__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
+	} else {
+		__nf_unregister_net_hook(net, reg->pf, reg);
+	}
 }
 EXPORT_SYMBOL(nf_unregister_net_hook);
 
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+	int err;
+
+	if (reg->pf == NFPROTO_INET) {
+		err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
+		if (err < 0)
+			return err;
+
+		err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
+		if (err < 0) {
+			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+			return err;
+		}
+	} else {
+		err = __nf_register_net_hook(net, reg->pf, reg);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(nf_register_net_hook);
+
 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
 			  unsigned int n)
 {
@@ -395,63 +464,10 @@ EXPORT_SYMBOL(nf_register_net_hooks);
 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
 			     unsigned int hookcount)
 {
-	struct nf_hook_entries *to_free[16], *p;
-	struct nf_hook_entries __rcu **pp;
-	unsigned int i, j, n;
+	unsigned int i;
 
-	mutex_lock(&nf_hook_mutex);
-	for (i = 0; i < hookcount; i++) {
-		pp = nf_hook_entry_head(net, &reg[i]);
-		if (!pp)
-			continue;
-
-		p = nf_entry_dereference(*pp);
-		if (WARN_ON_ONCE(!p))
-			continue;
-		__nf_unregister_net_hook(p, &reg[i]);
-	}
-	mutex_unlock(&nf_hook_mutex);
-
-	do {
-		n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free));
-
-		mutex_lock(&nf_hook_mutex);
-
-		for (i = 0, j = 0; i < hookcount && j < n; i++) {
-			pp = nf_hook_entry_head(net, &reg[i]);
-			if (!pp)
-				continue;
-
-			p = nf_entry_dereference(*pp);
-			if (!p)
-				continue;
-
-			to_free[j] = __nf_hook_entries_try_shrink(pp);
-			if (to_free[j])
-				++j;
-		}
-
-		mutex_unlock(&nf_hook_mutex);
-
-		if (j) {
-			unsigned int nfq;
-
-			synchronize_net();
-
-			/* need 2nd synchronize_net() if nfqueue is used, skb
-			 * can get reinjected right before nf_queue_hook_drop()
-			 */
-			nfq = nf_queue_nf_hook_drop(net);
-			if (nfq)
-				synchronize_net();
-
-			for (i = 0; i < j; i++)
-				kvfree(to_free[i]);
-		}
-
-		reg += n;
-		hookcount -= n;
-	} while (hookcount > 0);
+	for (i = 0; i < hookcount; i++)
+		nf_unregister_net_hook(net, &reg[i]);
 }
 EXPORT_SYMBOL(nf_unregister_net_hooks);
 
@@ -569,14 +585,27 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
 EXPORT_SYMBOL(nf_nat_decode_session_hook);
 #endif
 
+static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max)
+{
+	int h;
+
+	for (h = 0; h < max; h++)
+		RCU_INIT_POINTER(e[h], NULL);
+}
+
 static int __net_init netfilter_net_init(struct net *net)
 {
-	int i, h;
-
-	for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
-		for (h = 0; h < NF_MAX_HOOKS; h++)
-			RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
-	}
+	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
+	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
+#ifdef CONFIG_NETFILTER_FAMILY_ARP
+	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
+#endif
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	__netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
+#endif
+#if IS_ENABLED(CONFIG_DECNET)
+	__netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet));
+#endif
 
 #ifdef CONFIG_PROC_FS
 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index 5ca18f0..257ca39 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -127,14 +127,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 
 	if (ret <= 0)
 		return ret;
-	if (SET_WITH_TIMEOUT(set) &&
-	    ip_set_timeout_expired(ext_timeout(x, set)))
-		return 0;
-	if (SET_WITH_COUNTER(set))
-		ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
-	if (SET_WITH_SKBINFO(set))
-		ip_set_get_skbinfo(ext_skbinfo(x, set), ext, mext, flags);
-	return 1;
+	return ip_set_match_extensions(set, ext, mext, flags, x);
 }
 
 static int
@@ -227,6 +220,7 @@ mtype_list(const struct ip_set *set,
 	rcu_read_lock();
 	for (; cb->args[IPSET_CB_ARG0] < map->elements;
 	     cb->args[IPSET_CB_ARG0]++) {
+		cond_resched_rcu();
 		id = cb->args[IPSET_CB_ARG0];
 		x = get_ext(set, map, id);
 		if (!test_bit(id, map->members) ||
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index d8975a0..488d6d0 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -263,12 +263,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
 		if (ret)
 			return ret;
-		if (first_ip > last_ip) {
-			u32 tmp = first_ip;
-
-			first_ip = last_ip;
-			last_ip = tmp;
-		}
+		if (first_ip > last_ip)
+			swap(first_ip, last_ip);
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 4c279fb..c00b6a2 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -337,12 +337,8 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
 		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
 		if (ret)
 			return ret;
-		if (first_ip > last_ip) {
-			u32 tmp = first_ip;
-
-			first_ip = last_ip;
-			last_ip = tmp;
-		}
+		if (first_ip > last_ip)
+			swap(first_ip, last_ip);
 	} else if (tb[IPSET_ATTR_CIDR]) {
 		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 7f9bbd7..b561ca8 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -238,12 +238,8 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
 
 	first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
 	last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
-	if (first_port > last_port) {
-		u16 tmp = first_port;
-
-		first_port = last_port;
-		last_port = tmp;
-	}
+	if (first_port > last_port)
+		swap(first_port, last_port);
 
 	elements = last_port - first_port + 1;
 	set->dsize = ip_set_elem_len(set, tb, 0, 0);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index cf84f7b..728bf31 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -57,7 +57,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
 
 /* When the nfnl mutex is held: */
 #define ip_set_dereference(p)		\
-	rcu_dereference_protected(p, 1)
+	rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
 #define ip_set(inst, id)		\
 	ip_set_dereference((inst)->ip_set_list)[id]
 
@@ -472,6 +472,31 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
 }
 EXPORT_SYMBOL_GPL(ip_set_put_extensions);
 
+bool
+ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
+			struct ip_set_ext *mext, u32 flags, void *data)
+{
+	if (SET_WITH_TIMEOUT(set) &&
+	    ip_set_timeout_expired(ext_timeout(data, set)))
+		return false;
+	if (SET_WITH_COUNTER(set)) {
+		struct ip_set_counter *counter = ext_counter(data, set);
+
+		if (flags & IPSET_FLAG_MATCH_COUNTERS &&
+		    !(ip_set_match_counter(ip_set_get_packets(counter),
+				mext->packets, mext->packets_op) &&
+		      ip_set_match_counter(ip_set_get_bytes(counter),
+				mext->bytes, mext->bytes_op)))
+			return false;
+		ip_set_update_counter(counter, ext, flags);
+	}
+	if (SET_WITH_SKBINFO(set))
+		ip_set_get_skbinfo(ext_skbinfo(data, set),
+				   ext, mext, flags);
+	return true;
+}
+EXPORT_SYMBOL_GPL(ip_set_match_extensions);
+
 /* Creating/destroying/renaming/swapping affect the existence and
  * the properties of a set. All of these can be executed from userspace
  * only and serialized by the nfnl mutex indirectly from nfnetlink.
@@ -1386,11 +1411,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
 				goto next_set;
 			if (set->variant->uref)
 				set->variant->uref(set, cb, true);
-			/* Fall through and add elements */
+			/* fall through */
 		default:
-			rcu_read_lock_bh();
 			ret = set->variant->list(set, skb, cb);
-			rcu_read_unlock_bh();
 			if (!cb->args[IPSET_CB_ARG0])
 				/* Set is done, proceed with next one */
 				goto next_set;
@@ -2055,6 +2078,7 @@ ip_set_net_exit(struct net *net)
 
 	inst->is_deleted = true; /* flag for ip_set_nfnl_put */
 
+	nfnl_lock(NFNL_SUBSYS_IPSET);
 	for (i = 0; i < inst->ip_set_max; i++) {
 		set = ip_set(inst, i);
 		if (set) {
@@ -2062,6 +2086,7 @@ ip_set_net_exit(struct net *net)
 			ip_set_destroy_set(set);
 		}
 	}
+	nfnl_unlock(NFNL_SUBSYS_IPSET);
 	kfree(rcu_dereference_protected(inst->ip_set_list, 1));
 }
 
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index efffc8e..bbad940 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -917,12 +917,9 @@ static inline int
 mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
 		 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
 {
-	if (SET_WITH_COUNTER(set))
-		ip_set_update_counter(ext_counter(data, set),
-				      ext, mext, flags);
-	if (SET_WITH_SKBINFO(set))
-		ip_set_get_skbinfo(ext_skbinfo(data, set),
-				   ext, mext, flags);
+	if (!ip_set_match_extensions(set, ext, mext, flags, data))
+		return 0;
+	/* nomatch entries return -ENOTEMPTY */
 	return mtype_do_data_match(data);
 }
 
@@ -941,9 +938,9 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
 	struct mtype_elem *data;
 #if IPSET_NET_COUNT == 2
 	struct mtype_elem orig = *d;
-	int i, j = 0, k;
+	int ret, i, j = 0, k;
 #else
-	int i, j = 0;
+	int ret, i, j = 0;
 #endif
 	u32 key, multi = 0;
 
@@ -969,18 +966,13 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
 			data = ahash_data(n, i, set->dsize);
 			if (!mtype_data_equal(data, d, &multi))
 				continue;
-			if (SET_WITH_TIMEOUT(set)) {
-				if (!ip_set_timeout_expired(
-						ext_timeout(data, set)))
-					return mtype_data_match(data, ext,
-								mext, set,
-								flags);
+			ret = mtype_data_match(data, ext, mext, set, flags);
+			if (ret != 0)
+				return ret;
 #ifdef IP_SET_HASH_WITH_MULTI
-				multi = 0;
+			/* No match, reset multiple match flag */
+			multi = 0;
 #endif
-			} else
-				return mtype_data_match(data, ext,
-							mext, set, flags);
 		}
 #if IPSET_NET_COUNT == 2
 		}
@@ -1027,12 +1019,11 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 		if (!test_bit(i, n->used))
 			continue;
 		data = ahash_data(n, i, set->dsize);
-		if (mtype_data_equal(data, d, &multi) &&
-		    !(SET_WITH_TIMEOUT(set) &&
-		      ip_set_timeout_expired(ext_timeout(data, set)))) {
-			ret = mtype_data_match(data, ext, mext, set, flags);
+		if (!mtype_data_equal(data, d, &multi))
+			continue;
+		ret = mtype_data_match(data, ext, mext, set, flags);
+		if (ret != 0)
 			goto out;
-		}
 	}
 out:
 	return ret;
@@ -1143,6 +1134,7 @@ mtype_list(const struct ip_set *set,
 	rcu_read_lock();
 	for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits);
 	     cb->args[IPSET_CB_ARG0]++) {
+		cond_resched_rcu();
 		incomplete = skb_tail_pointer(skb);
 		n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0]));
 		pr_debug("cb->arg bucket: %lu, t %p n %p\n",
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index e864681..072a658 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -55,8 +55,9 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
 	       struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
 {
 	struct list_set *map = set->data;
+	struct ip_set_ext *mext = &opt->ext;
 	struct set_elem *e;
-	u32 cmdflags = opt->cmdflags;
+	u32 flags = opt->cmdflags;
 	int ret;
 
 	/* Don't lookup sub-counters at all */
@@ -64,21 +65,11 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
 	if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
 		opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
 	list_for_each_entry_rcu(e, &map->members, list) {
-		if (SET_WITH_TIMEOUT(set) &&
-		    ip_set_timeout_expired(ext_timeout(e, set)))
-			continue;
 		ret = ip_set_test(e->id, skb, par, opt);
-		if (ret > 0) {
-			if (SET_WITH_COUNTER(set))
-				ip_set_update_counter(ext_counter(e, set),
-						      ext, &opt->ext,
-						      cmdflags);
-			if (SET_WITH_SKBINFO(set))
-				ip_set_get_skbinfo(ext_skbinfo(e, set),
-						   ext, &opt->ext,
-						   cmdflags);
-			return ret;
-		}
+		if (ret <= 0)
+			continue;
+		if (ip_set_match_extensions(set, ext, mext, flags, e))
+			return 1;
 	}
 	return 0;
 }
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 3e053cb..f489b8d 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -322,7 +322,7 @@ ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
 {
 	__be16 _ports[2], *pptr;
 
-	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
 	if (pptr == NULL)
 		return 1;
 
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5cb7cac..5f6f73c 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -433,7 +433,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
 	/*
 	 * IPv6 frags, only the first hit here.
 	 */
-	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
 	if (pptr == NULL)
 		return NULL;
 
@@ -566,7 +566,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
 	struct netns_ipvs *ipvs = svc->ipvs;
 	struct net *net = ipvs->net;
 
-	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
+	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports);
 	if (!pptr)
 		return NF_DROP;
 	dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
@@ -982,7 +982,7 @@ static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
 	unsigned int offset;
 
 	*related = 1;
-	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh);
+	ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph);
 	if (ic == NULL)
 		return NF_DROP;
 
@@ -1214,7 +1214,7 @@ static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum,
 		return NULL;
 
 	pptr = frag_safe_skb_hp(skb, iph->len,
-				sizeof(_ports), _ports, iph);
+				sizeof(_ports), _ports);
 	if (!pptr)
 		return NULL;
 
@@ -1407,7 +1407,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
 		__be16 _ports[2], *pptr;
 
 		pptr = frag_safe_skb_hp(skb, iph.len,
-					 sizeof(_ports), _ports, &iph);
+					 sizeof(_ports), _ports);
 		if (pptr == NULL)
 			return NF_ACCEPT;	/* Not for me */
 		if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
@@ -1741,7 +1741,7 @@ static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
 
 	*related = 1;
 
-	ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph);
+	ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph);
 	if (ic == NULL)
 		return NF_DROP;
 
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 121a321..bcd9b7b 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -315,6 +315,7 @@ tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
 	switch (skb->ip_summed) {
 	case CHECKSUM_NONE:
 		skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
+		/* fall through */
 	case CHECKSUM_COMPLETE:
 #ifdef CONFIG_IP_VS_IPV6
 		if (af == AF_INET6) {
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 30e11cd..c15ef7c 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -319,6 +319,7 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
 		case CHECKSUM_NONE:
 			skb->csum = skb_checksum(skb, udphoff,
 						 skb->len - udphoff, 0);
+			/* fall through */
 		case CHECKSUM_COMPLETE:
 #ifdef CONFIG_IP_VS_IPV6
 			if (af == AF_INET6) {
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
new file mode 100644
index 0000000..a955182
--- /dev/null
+++ b/net/netfilter/nf_conncount.c
@@ -0,0 +1,373 @@
+/*
+ * count the number of connections matching an arbitrary key.
+ *
+ * (C) 2017 Red Hat GmbH
+ * Author: Florian Westphal <fw@strlen.de>
+ *
+ * split from xt_connlimit.c:
+ *   (c) 2000 Gerd Knorr <kraxel@bytesex.org>
+ *   Nov 2002: Martin Bene <martin.bene@icomedias.com>:
+ *		only ignore TIME_WAIT or gone connections
+ *   (C) CC Computer Consultants GmbH, 2007
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jhash.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_count.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+#define CONNCOUNT_SLOTS		256U
+
+#ifdef CONFIG_LOCKDEP
+#define CONNCOUNT_LOCK_SLOTS	8U
+#else
+#define CONNCOUNT_LOCK_SLOTS	256U
+#endif
+
+#define CONNCOUNT_GC_MAX_NODES	8
+#define MAX_KEYLEN		5
+
+/* we will save the tuples of all connections we care about */
+struct nf_conncount_tuple {
+	struct hlist_node		node;
+	struct nf_conntrack_tuple	tuple;
+};
+
+struct nf_conncount_rb {
+	struct rb_node node;
+	struct hlist_head hhead; /* connections/hosts in same subnet */
+	u32 key[MAX_KEYLEN];
+};
+
+static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
+
+struct nf_conncount_data {
+	unsigned int keylen;
+	struct rb_root root[CONNCOUNT_SLOTS];
+};
+
+static u_int32_t conncount_rnd __read_mostly;
+static struct kmem_cache *conncount_rb_cachep __read_mostly;
+static struct kmem_cache *conncount_conn_cachep __read_mostly;
+
+static inline bool already_closed(const struct nf_conn *conn)
+{
+	if (nf_ct_protonum(conn) == IPPROTO_TCP)
+		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
+		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
+	else
+		return 0;
+}
+
+static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
+{
+	return memcmp(a, b, klen * sizeof(u32));
+}
+
+static bool add_hlist(struct hlist_head *head,
+		      const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conncount_tuple *conn;
+
+	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+	if (conn == NULL)
+		return false;
+	conn->tuple = *tuple;
+	hlist_add_head(&conn->node, head);
+	return true;
+}
+
+static unsigned int check_hlist(struct net *net,
+				struct hlist_head *head,
+				const struct nf_conntrack_tuple *tuple,
+				const struct nf_conntrack_zone *zone,
+				bool *addit)
+{
+	const struct nf_conntrack_tuple_hash *found;
+	struct nf_conncount_tuple *conn;
+	struct hlist_node *n;
+	struct nf_conn *found_ct;
+	unsigned int length = 0;
+
+	*addit = true;
+
+	/* check the saved connections */
+	hlist_for_each_entry_safe(conn, n, head, node) {
+		found = nf_conntrack_find_get(net, zone, &conn->tuple);
+		if (found == NULL) {
+			hlist_del(&conn->node);
+			kmem_cache_free(conncount_conn_cachep, conn);
+			continue;
+		}
+
+		found_ct = nf_ct_tuplehash_to_ctrack(found);
+
+		if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
+			/*
+			 * Just to be sure we have it only once in the list.
+			 * We should not see tuples twice unless someone hooks
+			 * this into a table without "-p tcp --syn".
+			 */
+			*addit = false;
+		} else if (already_closed(found_ct)) {
+			/*
+			 * we do not care about connections which are
+			 * closed already -> ditch it
+			 */
+			nf_ct_put(found_ct);
+			hlist_del(&conn->node);
+			kmem_cache_free(conncount_conn_cachep, conn);
+			continue;
+		}
+
+		nf_ct_put(found_ct);
+		length++;
+	}
+
+	return length;
+}
+
+static void tree_nodes_free(struct rb_root *root,
+			    struct nf_conncount_rb *gc_nodes[],
+			    unsigned int gc_count)
+{
+	struct nf_conncount_rb *rbconn;
+
+	while (gc_count) {
+		rbconn = gc_nodes[--gc_count];
+		rb_erase(&rbconn->node, root);
+		kmem_cache_free(conncount_rb_cachep, rbconn);
+	}
+}
+
+static unsigned int
+count_tree(struct net *net, struct rb_root *root,
+	   const u32 *key, u8 keylen,
+	   u8 family,
+	   const struct nf_conntrack_tuple *tuple,
+	   const struct nf_conntrack_zone *zone)
+{
+	struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
+	struct rb_node **rbnode, *parent;
+	struct nf_conncount_rb *rbconn;
+	struct nf_conncount_tuple *conn;
+	unsigned int gc_count;
+	bool no_gc = false;
+
+ restart:
+	gc_count = 0;
+	parent = NULL;
+	rbnode = &(root->rb_node);
+	while (*rbnode) {
+		int diff;
+		bool addit;
+
+		rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
+
+		parent = *rbnode;
+		diff = key_diff(key, rbconn->key, keylen);
+		if (diff < 0) {
+			rbnode = &((*rbnode)->rb_left);
+		} else if (diff > 0) {
+			rbnode = &((*rbnode)->rb_right);
+		} else {
+			/* same source network -> be counted! */
+			unsigned int count;
+			count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
+
+			tree_nodes_free(root, gc_nodes, gc_count);
+			if (!addit)
+				return count;
+
+			if (!add_hlist(&rbconn->hhead, tuple))
+				return 0; /* hotdrop */
+
+			return count + 1;
+		}
+
+		if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
+			continue;
+
+		/* only used for GC on hhead, retval and 'addit' ignored */
+		check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
+		if (hlist_empty(&rbconn->hhead))
+			gc_nodes[gc_count++] = rbconn;
+	}
+
+	if (gc_count) {
+		no_gc = true;
+		tree_nodes_free(root, gc_nodes, gc_count);
+		/* tree_node_free before new allocation permits
+		 * allocator to re-use newly free'd object.
+		 *
+		 * This is a rare event; in most cases we will find
+		 * existing node to re-use. (or gc_count is 0).
+		 */
+		goto restart;
+	}
+
+	/* no match, need to insert new node */
+	rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
+	if (rbconn == NULL)
+		return 0;
+
+	conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
+	if (conn == NULL) {
+		kmem_cache_free(conncount_rb_cachep, rbconn);
+		return 0;
+	}
+
+	conn->tuple = *tuple;
+	memcpy(rbconn->key, key, sizeof(u32) * keylen);
+
+	INIT_HLIST_HEAD(&rbconn->hhead);
+	hlist_add_head(&conn->node, &rbconn->hhead);
+
+	rb_link_node(&rbconn->node, parent, rbnode);
+	rb_insert_color(&rbconn->node, root);
+	return 1;
+}
+
+unsigned int nf_conncount_count(struct net *net,
+				struct nf_conncount_data *data,
+				const u32 *key,
+				unsigned int family,
+				const struct nf_conntrack_tuple *tuple,
+				const struct nf_conntrack_zone *zone)
+{
+	struct rb_root *root;
+	int count;
+	u32 hash;
+
+	hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
+	root = &data->root[hash];
+
+	spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+
+	count = count_tree(net, root, key, data->keylen, family, tuple, zone);
+
+	spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
+
+	return count;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_count);
+
+struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
+					    unsigned int keylen)
+{
+	struct nf_conncount_data *data;
+	int ret, i;
+
+	if (keylen % sizeof(u32) ||
+	    keylen / sizeof(u32) > MAX_KEYLEN ||
+	    keylen == 0)
+		return ERR_PTR(-EINVAL);
+
+	net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	ret = nf_ct_netns_get(net, family);
+	if (ret < 0) {
+		kfree(data);
+		return ERR_PTR(ret);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
+		data->root[i] = RB_ROOT;
+
+	data->keylen = keylen / sizeof(u32);
+
+	return data;
+}
+EXPORT_SYMBOL_GPL(nf_conncount_init);
+
+static void destroy_tree(struct rb_root *r)
+{
+	struct nf_conncount_tuple *conn;
+	struct nf_conncount_rb *rbconn;
+	struct hlist_node *n;
+	struct rb_node *node;
+
+	while ((node = rb_first(r)) != NULL) {
+		rbconn = rb_entry(node, struct nf_conncount_rb, node);
+
+		rb_erase(node, r);
+
+		hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
+			kmem_cache_free(conncount_conn_cachep, conn);
+
+		kmem_cache_free(conncount_rb_cachep, rbconn);
+	}
+}
+
+void nf_conncount_destroy(struct net *net, unsigned int family,
+			  struct nf_conncount_data *data)
+{
+	unsigned int i;
+
+	nf_ct_netns_put(net, family);
+
+	for (i = 0; i < ARRAY_SIZE(data->root); ++i)
+		destroy_tree(&data->root[i]);
+
+	kfree(data);
+}
+EXPORT_SYMBOL_GPL(nf_conncount_destroy);
+
+static int __init nf_conncount_modinit(void)
+{
+	int i;
+
+	BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
+	BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
+
+	for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
+		spin_lock_init(&nf_conncount_locks[i]);
+
+	conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
+					   sizeof(struct nf_conncount_tuple),
+					   0, 0, NULL);
+	if (!conncount_conn_cachep)
+		return -ENOMEM;
+
+	conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
+					   sizeof(struct nf_conncount_rb),
+					   0, 0, NULL);
+	if (!conncount_rb_cachep) {
+		kmem_cache_destroy(conncount_conn_cachep);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void __exit nf_conncount_modexit(void)
+{
+	kmem_cache_destroy(conncount_conn_cachep);
+	kmem_cache_destroy(conncount_rb_cachep);
+}
+
+module_init(nf_conncount_modinit);
+module_exit(nf_conncount_modexit);
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 85f643c..6a64d52 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -901,6 +901,9 @@ static unsigned int early_drop_list(struct net *net,
 	hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
 		tmp = nf_ct_tuplehash_to_ctrack(h);
 
+		if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
+			continue;
+
 		if (nf_ct_is_expired(tmp)) {
 			nf_ct_gc_expired(tmp);
 			continue;
@@ -975,6 +978,18 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct)
 	return false;
 }
 
+#define	DAY	(86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static void nf_ct_offload_timeout(struct nf_conn *ct)
+{
+	if (nf_ct_expires(ct) < DAY / 2)
+		ct->timeout = nfct_time_stamp + DAY;
+}
+
 static void gc_worker(struct work_struct *work)
 {
 	unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
@@ -1011,6 +1026,11 @@ static void gc_worker(struct work_struct *work)
 			tmp = nf_ct_tuplehash_to_ctrack(h);
 
 			scanned++;
+			if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
+				nf_ct_offload_timeout(tmp);
+				continue;
+			}
+
 			if (nf_ct_is_expired(tmp)) {
 				nf_ct_gc_expired(tmp);
 				expired_count++;
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index dc63473..1601275 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -1,4 +1,4 @@
-/****************************************************************************
+/*
  * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323
  * 			      	     conntrack/NAT module.
  *
@@ -8,7 +8,7 @@
  *
  * See ip_conntrack_helper_h323_asn1.h for details.
  *
- ****************************************************************************/
+ */
 
 #ifdef __KERNEL__
 #include <linux/kernel.h>
@@ -140,14 +140,15 @@ static const decoder_t Decoders[] = {
 	decode_choice,
 };
 
-/****************************************************************************
+/*
  * H.323 Types
- ****************************************************************************/
+ */
 #include "nf_conntrack_h323_types.c"
 
-/****************************************************************************
+/*
  * Functions
- ****************************************************************************/
+ */
+
 /* Assume bs is aligned && v < 16384 */
 static unsigned int get_len(struct bitstr *bs)
 {
@@ -177,7 +178,6 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits)
 	return 0;
 }
 
-/****************************************************************************/
 static unsigned int get_bit(struct bitstr *bs)
 {
 	unsigned int b = (*bs->cur) & (0x80 >> bs->bit);
@@ -187,7 +187,6 @@ static unsigned int get_bit(struct bitstr *bs)
 	return b;
 }
 
-/****************************************************************************/
 /* Assume b <= 8 */
 static unsigned int get_bits(struct bitstr *bs, unsigned int b)
 {
@@ -213,7 +212,6 @@ static unsigned int get_bits(struct bitstr *bs, unsigned int b)
 	return v;
 }
 
-/****************************************************************************/
 /* Assume b <= 32 */
 static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
 {
@@ -251,9 +249,9 @@ static unsigned int get_bitmap(struct bitstr *bs, unsigned int b)
 	return v;
 }
 
-/****************************************************************************
+/*
  * Assume bs is aligned and sizeof(unsigned int) == 4
- ****************************************************************************/
+ */
 static unsigned int get_uint(struct bitstr *bs, int b)
 {
 	unsigned int v = 0;
@@ -262,12 +260,15 @@ static unsigned int get_uint(struct bitstr *bs, int b)
 	case 4:
 		v |= *bs->cur++;
 		v <<= 8;
+		/* fall through */
 	case 3:
 		v |= *bs->cur++;
 		v <<= 8;
+		/* fall through */
 	case 2:
 		v |= *bs->cur++;
 		v <<= 8;
+		/* fall through */
 	case 1:
 		v |= *bs->cur++;
 		break;
@@ -275,7 +276,6 @@ static unsigned int get_uint(struct bitstr *bs, int b)
 	return v;
 }
 
-/****************************************************************************/
 static int decode_nul(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -284,7 +284,6 @@ static int decode_nul(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_bool(struct bitstr *bs, const struct field_t *f,
                        char *base, int level)
 {
@@ -296,7 +295,6 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_oid(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -316,7 +314,6 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_int(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -364,7 +361,6 @@ static int decode_int(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_enum(struct bitstr *bs, const struct field_t *f,
                        char *base, int level)
 {
@@ -381,7 +377,6 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -418,7 +413,6 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_numstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -439,7 +433,6 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_octstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -493,7 +486,6 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -523,7 +515,6 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_seq(struct bitstr *bs, const struct field_t *f,
                       char *base, int level)
 {
@@ -653,7 +644,6 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 static int decode_seqof(struct bitstr *bs, const struct field_t *f,
                         char *base, int level)
 {
@@ -750,8 +740,6 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-
-/****************************************************************************/
 static int decode_choice(struct bitstr *bs, const struct field_t *f,
                          char *base, int level)
 {
@@ -833,7 +821,6 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f,
 	return H323_ERROR_NONE;
 }
 
-/****************************************************************************/
 int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
 {
 	static const struct field_t ras_message = {
@@ -849,7 +836,6 @@ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras)
 	return decode_choice(&bs, &ras_message, (char *) ras, 0);
 }
 
-/****************************************************************************/
 static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
 				      size_t sz, H323_UserInformation *uuie)
 {
@@ -867,7 +853,6 @@ static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg,
 	return decode_seq(&bs, &h323_userinformation, (char *) uuie, 0);
 }
 
-/****************************************************************************/
 int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
 					 MultimediaSystemControlMessage *
 					 mscm)
@@ -886,7 +871,6 @@ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
 			     (char *) mscm, 0);
 }
 
-/****************************************************************************/
 int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931)
 {
 	unsigned char *p = buf;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index f71f0d2..005589c 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -24,6 +24,7 @@
 #include <linux/skbuff.h>
 #include <net/route.h>
 #include <net/ip6_route.h>
+#include <linux/netfilter_ipv6.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
@@ -115,7 +116,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245;
 static struct nf_conntrack_helper nf_conntrack_helper_q931[];
 static struct nf_conntrack_helper nf_conntrack_helper_ras[];
 
-/****************************************************************************/
 static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
 			 struct nf_conn *ct, enum ip_conntrack_info ctinfo,
 			 unsigned char **data, int *datalen, int *dataoff)
@@ -219,7 +219,6 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
 	return 0;
 }
 
-/****************************************************************************/
 static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
 			 H245_TransportAddress *taddr,
 			 union nf_inet_addr *addr, __be16 *port)
@@ -254,7 +253,6 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data,
 	return 1;
 }
 
-/****************************************************************************/
 static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 			   enum ip_conntrack_info ctinfo,
 			   unsigned int protoff,
@@ -328,7 +326,6 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int expect_t120(struct sk_buff *skb,
 		       struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
@@ -380,7 +377,6 @@ static int expect_t120(struct sk_buff *skb,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_h245_channel(struct sk_buff *skb,
 				struct nf_conn *ct,
 				enum ip_conntrack_info ctinfo,
@@ -410,7 +406,6 @@ static int process_h245_channel(struct sk_buff *skb,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -472,7 +467,6 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
 			enum ip_conntrack_info ctinfo,
 			unsigned int protoff, unsigned char **data, int dataoff,
@@ -542,7 +536,6 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
 			enum ip_conntrack_info ctinfo,
 			unsigned int protoff, unsigned char **data, int dataoff,
@@ -578,7 +571,6 @@ static int process_h245(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int h245_help(struct sk_buff *skb, unsigned int protoff,
 		     struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
@@ -628,7 +620,6 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
 	return NF_DROP;
 }
 
-/****************************************************************************/
 static const struct nf_conntrack_expect_policy h245_exp_policy = {
 	.max_expected	= H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */,
 	.timeout	= 240,
@@ -643,7 +634,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
 	.expect_policy		= &h245_exp_policy,
 };
 
-/****************************************************************************/
 int get_h225_addr(struct nf_conn *ct, unsigned char *data,
 		  TransportAddress *taddr,
 		  union nf_inet_addr *addr, __be16 *port)
@@ -675,7 +665,6 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data,
 	return 1;
 }
 
-/****************************************************************************/
 static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff, unsigned char **data, int dataoff,
@@ -726,20 +715,15 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct,
 }
 
 /* If the calling party is on the same side of the forward-to party,
- * we don't need to track the second call */
+ * we don't need to track the second call
+ */
 static int callforward_do_filter(struct net *net,
 				 const union nf_inet_addr *src,
 				 const union nf_inet_addr *dst,
 				 u_int8_t family)
 {
-	const struct nf_afinfo *afinfo;
 	int ret = 0;
 
-	/* rcu_read_lock()ed by nf_hook_thresh */
-	afinfo = nf_get_afinfo(family);
-	if (!afinfo)
-		return 0;
-
 	switch (family) {
 	case AF_INET: {
 		struct flowi4 fl1, fl2;
@@ -750,10 +734,10 @@ static int callforward_do_filter(struct net *net,
 
 		memset(&fl2, 0, sizeof(fl2));
 		fl2.daddr = dst->ip;
-		if (!afinfo->route(net, (struct dst_entry **)&rt1,
-				   flowi4_to_flowi(&fl1), false)) {
-			if (!afinfo->route(net, (struct dst_entry **)&rt2,
-					   flowi4_to_flowi(&fl2), false)) {
+		if (!nf_ip_route(net, (struct dst_entry **)&rt1,
+				 flowi4_to_flowi(&fl1), false)) {
+			if (!nf_ip_route(net, (struct dst_entry **)&rt2,
+					 flowi4_to_flowi(&fl2), false)) {
 				if (rt_nexthop(rt1, fl1.daddr) ==
 				    rt_nexthop(rt2, fl2.daddr) &&
 				    rt1->dst.dev  == rt2->dst.dev)
@@ -766,18 +750,23 @@ static int callforward_do_filter(struct net *net,
 	}
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6)
 	case AF_INET6: {
-		struct flowi6 fl1, fl2;
+		const struct nf_ipv6_ops *v6ops;
 		struct rt6_info *rt1, *rt2;
+		struct flowi6 fl1, fl2;
+
+		v6ops = nf_get_ipv6_ops();
+		if (!v6ops)
+			return 0;
 
 		memset(&fl1, 0, sizeof(fl1));
 		fl1.daddr = src->in6;
 
 		memset(&fl2, 0, sizeof(fl2));
 		fl2.daddr = dst->in6;
-		if (!afinfo->route(net, (struct dst_entry **)&rt1,
-				   flowi6_to_flowi(&fl1), false)) {
-			if (!afinfo->route(net, (struct dst_entry **)&rt2,
-					   flowi6_to_flowi(&fl2), false)) {
+		if (!v6ops->route(net, (struct dst_entry **)&rt1,
+				  flowi6_to_flowi(&fl1), false)) {
+			if (!v6ops->route(net, (struct dst_entry **)&rt2,
+					  flowi6_to_flowi(&fl2), false)) {
 				if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
 						    rt6_nexthop(rt2, &fl2.daddr)) &&
 				    rt1->dst.dev == rt2->dst.dev)
@@ -794,7 +783,6 @@ static int callforward_do_filter(struct net *net,
 
 }
 
-/****************************************************************************/
 static int expect_callforwarding(struct sk_buff *skb,
 				 struct nf_conn *ct,
 				 enum ip_conntrack_info ctinfo,
@@ -815,7 +803,8 @@ static int expect_callforwarding(struct sk_buff *skb,
 		return 0;
 
 	/* If the calling party is on the same side of the forward-to party,
-	 * we don't need to track the second call */
+	 * we don't need to track the second call
+	 */
 	if (callforward_filter &&
 	    callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3,
 				  nf_ct_l3num(ct))) {
@@ -854,7 +843,6 @@ static int expect_callforwarding(struct sk_buff *skb,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 			 enum ip_conntrack_info ctinfo,
 			 unsigned int protoff,
@@ -925,7 +913,6 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_callproceeding(struct sk_buff *skb,
 				  struct nf_conn *ct,
 				  enum ip_conntrack_info ctinfo,
@@ -958,7 +945,6 @@ static int process_callproceeding(struct sk_buff *skb,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 			   enum ip_conntrack_info ctinfo,
 			   unsigned int protoff,
@@ -990,7 +976,6 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 			    enum ip_conntrack_info ctinfo,
 			    unsigned int protoff,
@@ -1022,7 +1007,6 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 			    enum ip_conntrack_info ctinfo,
 			    unsigned int protoff,
@@ -1063,7 +1047,6 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 			    enum ip_conntrack_info ctinfo,
 			    unsigned int protoff,
@@ -1095,7 +1078,6 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 			enum ip_conntrack_info ctinfo,
 			unsigned int protoff, unsigned char **data, int dataoff,
@@ -1154,7 +1136,6 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int q931_help(struct sk_buff *skb, unsigned int protoff,
 		     struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
@@ -1203,7 +1184,6 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
 	return NF_DROP;
 }
 
-/****************************************************************************/
 static const struct nf_conntrack_expect_policy q931_exp_policy = {
 	/* T.120 and H.245 */
 	.max_expected		= H323_RTP_CHANNEL_MAX * 4 + 4,
@@ -1231,7 +1211,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
 	},
 };
 
-/****************************************************************************/
 static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
 				   int *datalen)
 {
@@ -1249,7 +1228,6 @@ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff,
 	return skb_header_pointer(skb, dataoff, *datalen, h323_buffer);
 }
 
-/****************************************************************************/
 static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
 					       union nf_inet_addr *addr,
 					       __be16 port)
@@ -1270,7 +1248,6 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
 	return NULL;
 }
 
-/****************************************************************************/
 static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff, unsigned char **data,
@@ -1328,7 +1305,6 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1346,7 +1322,6 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1391,7 +1366,6 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1428,7 +1402,6 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1480,7 +1453,6 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1514,7 +1486,6 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1559,7 +1530,6 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1608,7 +1578,6 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1626,7 +1595,6 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1666,7 +1634,6 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct,
 	return ret;
 }
 
-/****************************************************************************/
 static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1700,7 +1667,6 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
 		       enum ip_conntrack_info ctinfo,
 		       unsigned int protoff,
@@ -1745,7 +1711,6 @@ static int process_ras(struct sk_buff *skb, struct nf_conn *ct,
 	return 0;
 }
 
-/****************************************************************************/
 static int ras_help(struct sk_buff *skb, unsigned int protoff,
 		    struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
@@ -1788,7 +1753,6 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff,
 	return NF_DROP;
 }
 
-/****************************************************************************/
 static const struct nf_conntrack_expect_policy ras_exp_policy = {
 	.max_expected		= 32,
 	.timeout		= 240,
@@ -1849,7 +1813,6 @@ static void __exit h323_helper_exit(void)
 	nf_conntrack_helper_unregister(&nf_conntrack_helper_h245);
 }
 
-/****************************************************************************/
 static void __exit nf_conntrack_h323_fini(void)
 {
 	h323_helper_exit();
@@ -1857,7 +1820,6 @@ static void __exit nf_conntrack_h323_fini(void)
 	pr_debug("nf_ct_h323: fini\n");
 }
 
-/****************************************************************************/
 static int __init nf_conntrack_h323_init(void)
 {
 	int ret;
@@ -1877,7 +1839,6 @@ static int __init nf_conntrack_h323_init(void)
 	return ret;
 }
 
-/****************************************************************************/
 module_init(nf_conntrack_h323_init);
 module_exit(nf_conntrack_h323_fini);
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 382d497..7c7921a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -544,7 +544,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct)
 	len *= 3u; /* ORIG, REPLY, MASTER */
 
 	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
-	len += l4proto->nla_size;
+	len += l4proto->nlattr_size;
 	if (l4proto->nlattr_tuple_size) {
 		len4 = l4proto->nlattr_tuple_size();
 		len4 *= 3u; /* ORIG, REPLY, MASTER */
@@ -1110,6 +1110,14 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
 				    .len = NF_CT_LABELS_MAX_SIZE },
 };
 
+static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
+{
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		return 0;
+
+	return ctnetlink_filter_match(ct, data);
+}
+
 static int ctnetlink_flush_conntrack(struct net *net,
 				     const struct nlattr * const cda[],
 				     u32 portid, int report)
@@ -1122,7 +1130,7 @@ static int ctnetlink_flush_conntrack(struct net *net,
 			return PTR_ERR(filter);
 	}
 
-	nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter,
+	nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter,
 				  portid, report);
 	kfree(filter);
 
@@ -1168,6 +1176,11 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
 
 	ct = nf_ct_tuplehash_to_ctrack(h);
 
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) {
+		nf_ct_put(ct);
+		return -EBUSY;
+	}
+
 	if (cda[CTA_ID]) {
 		u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
 		if (id != (u32)(unsigned long)ct) {
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index c8e9c95..afdeca5 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -385,14 +385,14 @@ void nf_ct_l4proto_unregister_sysctl(struct net *net,
 
 /* FIXME: Allow NULL functions and sub in pointers to generic for
    them. --RR */
-int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
+int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto)
 {
 	int ret = 0;
 
 	if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos))
 		return -EBUSY;
 
-	if ((l4proto->to_nlattr && !l4proto->nlattr_size) ||
+	if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) ||
 	    (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
 		return -EINVAL;
 
@@ -428,10 +428,6 @@ int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
 		goto out_unlock;
 	}
 
-	l4proto->nla_size = 0;
-	if (l4proto->nlattr_size)
-		l4proto->nla_size += l4proto->nlattr_size();
-
 	rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
 			   l4proto);
 out_unlock:
@@ -502,7 +498,7 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net,
 }
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
 
-int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[],
+int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[],
 			   unsigned int num_proto)
 {
 	int ret = -EINVAL, ver;
@@ -524,7 +520,7 @@ int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[],
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_register);
 
 int nf_ct_l4proto_pernet_register(struct net *net,
-				  struct nf_conntrack_l4proto *const l4proto[],
+				  const struct nf_conntrack_l4proto *const l4proto[],
 				  unsigned int num_proto)
 {
 	int ret = -EINVAL;
@@ -545,7 +541,7 @@ int nf_ct_l4proto_pernet_register(struct net *net,
 }
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
 
-void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
+void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[],
 			      unsigned int num_proto)
 {
 	mutex_lock(&nf_ct_proto_mutex);
@@ -555,12 +551,12 @@ void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
 
 	synchronize_net();
 	/* Remove all contrack entries for this protocol */
-	nf_ct_iterate_destroy(kill_l4proto, l4proto);
+	nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto);
 }
 EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister);
 
 void nf_ct_l4proto_pernet_unregister(struct net *net,
-				struct nf_conntrack_l4proto *const l4proto[],
+				const struct nf_conntrack_l4proto *const l4proto[],
 				unsigned int num_proto)
 {
 	while (num_proto-- != 0)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 2a446f4..abe647d 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -654,6 +654,12 @@ static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = {
 	[CTA_PROTOINFO_DCCP_PAD]	= { .type = NLA_UNSPEC },
 };
 
+#define DCCP_NLATTR_SIZE ( \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + sizeof(u64)) + \
+	NLA_ALIGN(NLA_HDRLEN + 0))
+
 static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 {
 	struct nlattr *attr = cda[CTA_PROTOINFO_DCCP];
@@ -691,13 +697,6 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 	spin_unlock_bh(&ct->lock);
 	return 0;
 }
-
-static int dccp_nlattr_size(void)
-{
-	return nla_total_size(0)	/* CTA_PROTOINFO_DCCP */
-		+ nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
-}
-
 #endif
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -862,7 +861,7 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.dccp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
 	.l3proto		= AF_INET,
 	.l4proto		= IPPROTO_DCCP,
 	.pkt_to_tuple		= dccp_pkt_to_tuple,
@@ -876,8 +875,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
 	.print_conntrack	= dccp_print_conntrack,
 #endif
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= DCCP_NLATTR_SIZE,
 	.to_nlattr		= dccp_to_nlattr,
-	.nlattr_size		= dccp_nlattr_size,
 	.from_nlattr		= nlattr_to_dccp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
@@ -898,7 +897,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4);
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
 	.l3proto		= AF_INET6,
 	.l4proto		= IPPROTO_DCCP,
 	.pkt_to_tuple		= dccp_pkt_to_tuple,
@@ -912,8 +911,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = {
 	.print_conntrack	= dccp_print_conntrack,
 #endif
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= DCCP_NLATTR_SIZE,
 	.to_nlattr		= dccp_to_nlattr,
-	.nlattr_size		= dccp_nlattr_size,
 	.from_nlattr		= nlattr_to_dccp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 1f86ddf..6c6896d 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -12,7 +12,7 @@
 #include <linux/netfilter.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
 
-static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static const unsigned int nf_ct_generic_timeout = 600*HZ;
 
 static bool nf_generic_should_process(u8 proto)
 {
@@ -163,7 +163,7 @@ static struct nf_proto_net *generic_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.generic.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
 {
 	.l3proto		= PF_UNSPEC,
 	.l4proto		= 255,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index a250300..d049ea5 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -48,7 +48,7 @@ enum grep_conntrack {
 	GRE_CT_MAX
 };
 
-static unsigned int gre_timeouts[GRE_CT_MAX] = {
+static const unsigned int gre_timeouts[GRE_CT_MAX] = {
 	[GRE_CT_UNREPLIED]	= 30*HZ,
 	[GRE_CT_REPLIED]	= 180*HZ,
 };
@@ -352,7 +352,7 @@ static int gre_init_net(struct net *net, u_int16_t proto)
 }
 
 /* protocol helper struct */
-static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
+static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
 	.l3proto	 = AF_INET,
 	.l4proto	 = IPPROTO_GRE,
 	.pkt_to_tuple	 = gre_pkt_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 80faf04d..fb9a35d 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -52,7 +52,7 @@ static const char *const sctp_conntrack_names[] = {
 #define HOURS * 60 MINS
 #define DAYS  * 24 HOURS
 
-static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
+static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
 	[SCTP_CONNTRACK_CLOSED]			= 10 SECS,
 	[SCTP_CONNTRACK_COOKIE_WAIT]		= 3 SECS,
 	[SCTP_CONNTRACK_COOKIE_ECHOED]		= 3 SECS,
@@ -578,6 +578,11 @@ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = {
 	[CTA_PROTOINFO_SCTP_VTAG_REPLY]     = { .type = NLA_U32 },
 };
 
+#define SCTP_NLATTR_SIZE ( \
+		NLA_ALIGN(NLA_HDRLEN + 1) + \
+		NLA_ALIGN(NLA_HDRLEN + 4) + \
+		NLA_ALIGN(NLA_HDRLEN + 4))
+
 static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
 {
 	struct nlattr *attr = cda[CTA_PROTOINFO_SCTP];
@@ -608,12 +613,6 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
 
 	return 0;
 }
-
-static int sctp_nlattr_size(void)
-{
-	return nla_total_size(0)	/* CTA_PROTOINFO_SCTP */
-		+ nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1);
-}
 #endif
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -778,7 +777,7 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.sctp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
 	.l3proto		= PF_INET,
 	.l4proto 		= IPPROTO_SCTP,
 	.pkt_to_tuple 		= sctp_pkt_to_tuple,
@@ -793,8 +792,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
 	.can_early_drop		= sctp_can_early_drop,
 	.me 			= THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= SCTP_NLATTR_SIZE,
 	.to_nlattr		= sctp_to_nlattr,
-	.nlattr_size		= sctp_nlattr_size,
 	.from_nlattr		= nlattr_to_sctp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
@@ -815,7 +814,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4);
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
 	.l3proto		= PF_INET6,
 	.l4proto 		= IPPROTO_SCTP,
 	.pkt_to_tuple 		= sctp_pkt_to_tuple,
@@ -830,8 +829,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
 	.can_early_drop		= sctp_can_early_drop,
 	.me 			= THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= SCTP_NLATTR_SIZE,
 	.to_nlattr		= sctp_to_nlattr,
-	.nlattr_size		= sctp_nlattr_size,
 	.from_nlattr		= nlattr_to_sctp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_tuple_size	= nf_ct_port_nlattr_tuple_size,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 37ef35b..e97cdc1 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -68,7 +68,7 @@ static const char *const tcp_conntrack_names[] = {
 #define HOURS * 60 MINS
 #define DAYS * 24 HOURS
 
-static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
+static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
 	[TCP_CONNTRACK_SYN_SENT]	= 2 MINS,
 	[TCP_CONNTRACK_SYN_RECV]	= 60 SECS,
 	[TCP_CONNTRACK_ESTABLISHED]	= 5 DAYS,
@@ -305,6 +305,9 @@ static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
 /* Print out the private part of the conntrack. */
 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
 {
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		return;
+
 	seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
 }
 #endif
@@ -1222,6 +1225,12 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
 	[CTA_PROTOINFO_TCP_FLAGS_REPLY]	    = { .len =  sizeof(struct nf_ct_tcp_flags) },
 };
 
+#define TCP_NLATTR_SIZE	( \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + 1) + \
+	NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
+	NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+
 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 {
 	struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
@@ -1274,12 +1283,6 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 	return 0;
 }
 
-static int tcp_nlattr_size(void)
-{
-	return nla_total_size(0)	   /* CTA_PROTOINFO_TCP */
-		+ nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1);
-}
-
 static unsigned int tcp_nlattr_tuple_size(void)
 {
 	static unsigned int size __read_mostly;
@@ -1541,7 +1544,7 @@ static struct nf_proto_net *tcp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.tcp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
 {
 	.l3proto		= PF_INET,
 	.l4proto 		= IPPROTO_TCP,
@@ -1557,11 +1560,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
 	.can_early_drop		= tcp_can_early_drop,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 	.to_nlattr		= tcp_to_nlattr,
-	.nlattr_size		= tcp_nlattr_size,
 	.from_nlattr		= nlattr_to_tcp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
 	.nlattr_tuple_size	= tcp_nlattr_tuple_size,
+	.nlattr_size		= TCP_NLATTR_SIZE,
 	.nla_policy		= nf_ct_port_nla_policy,
 #endif
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
@@ -1579,7 +1582,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto 		= IPPROTO_TCP,
@@ -1594,8 +1597,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
 	.error			= tcp_error,
 	.can_early_drop		= tcp_can_early_drop,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+	.nlattr_size		= TCP_NLATTR_SIZE,
 	.to_nlattr		= tcp_to_nlattr,
-	.nlattr_size		= tcp_nlattr_size,
 	.from_nlattr		= nlattr_to_tcp,
 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 3a5f727..fe72439 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -26,7 +26,7 @@
 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
 
-static unsigned int udp_timeouts[UDP_CT_MAX] = {
+static const unsigned int udp_timeouts[UDP_CT_MAX] = {
 	[UDP_CT_UNREPLIED]	= 30*HZ,
 	[UDP_CT_REPLIED]	= 180*HZ,
 };
@@ -296,7 +296,7 @@ static struct nf_proto_net *udp_get_net_proto(struct net *net)
 	return &net->ct.nf_ct_proto.udp.pn;
 }
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
 {
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_UDP,
@@ -328,7 +328,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
 
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
 {
 	.l3proto		= PF_INET,
 	.l4proto		= IPPROTO_UDPLITE,
@@ -360,7 +360,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
 #endif
 
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_UDP,
@@ -392,7 +392,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
 
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
+const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
 {
 	.l3proto		= PF_INET6,
 	.l4proto		= IPPROTO_UDPLITE,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 5a101ca..46d32ba 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -309,10 +309,12 @@ static int ct_seq_show(struct seq_file *s, void *v)
 	WARN_ON(!l4proto);
 
 	ret = -ENOSPC;
-	seq_printf(s, "%-8s %u %-8s %u %ld ",
+	seq_printf(s, "%-8s %u %-8s %u ",
 		   l3proto_name(l3proto->l3proto), nf_ct_l3num(ct),
-		   l4proto_name(l4proto->l4proto), nf_ct_protonum(ct),
-		   nf_ct_expires(ct)  / HZ);
+		   l4proto_name(l4proto->l4proto), nf_ct_protonum(ct));
+
+	if (!test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		seq_printf(s, "%ld ", nf_ct_expires(ct)  / HZ);
 
 	if (l4proto->print_conntrack)
 		l4proto->print_conntrack(s, ct);
@@ -339,7 +341,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
 	if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
 		goto release;
 
-	if (test_bit(IPS_ASSURED_BIT, &ct->status))
+	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
+		seq_puts(s, "[OFFLOAD] ");
+	else if (test_bit(IPS_ASSURED_BIT, &ct->status))
 		seq_puts(s, "[ASSURED] ");
 
 	if (seq_has_overflowed(s))
diff --git a/net/netfilter/nf_flow_table.c b/net/netfilter/nf_flow_table.c
new file mode 100644
index 0000000..2f5099c
--- /dev/null
+++ b/net/netfilter/nf_flow_table.c
@@ -0,0 +1,429 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+struct flow_offload_entry {
+	struct flow_offload	flow;
+	struct nf_conn		*ct;
+	struct rcu_head		rcu_head;
+};
+
+struct flow_offload *
+flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
+{
+	struct flow_offload_entry *entry;
+	struct flow_offload *flow;
+
+	if (unlikely(nf_ct_is_dying(ct) ||
+	    !atomic_inc_not_zero(&ct->ct_general.use)))
+		return NULL;
+
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		goto err_ct_refcnt;
+
+	flow = &entry->flow;
+
+	if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
+		goto err_dst_cache_original;
+
+	if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
+		goto err_dst_cache_reply;
+
+	entry->ct = ct;
+
+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) {
+	case NFPROTO_IPV4:
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in;
+		break;
+	case NFPROTO_IPV6:
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6 =
+			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in6;
+		flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6 =
+			ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in6;
+		break;
+	}
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l3proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l3proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l4proto =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache =
+		  route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache =
+		  route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port =
+		ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port =
+		ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.tcp.port;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port =
+		ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dir =
+						FLOW_OFFLOAD_DIR_ORIGINAL;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dir =
+						FLOW_OFFLOAD_DIR_REPLY;
+
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.oifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex;
+	flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.oifidx =
+		route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex;
+
+	if (ct->status & IPS_SRC_NAT)
+		flow->flags |= FLOW_OFFLOAD_SNAT;
+	else if (ct->status & IPS_DST_NAT)
+		flow->flags |= FLOW_OFFLOAD_DNAT;
+
+	return flow;
+
+err_dst_cache_reply:
+	dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
+err_dst_cache_original:
+	kfree(entry);
+err_ct_refcnt:
+	nf_ct_put(ct);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(flow_offload_alloc);
+
+void flow_offload_free(struct flow_offload *flow)
+{
+	struct flow_offload_entry *e;
+
+	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
+	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
+	e = container_of(flow, struct flow_offload_entry, flow);
+	kfree(e);
+}
+EXPORT_SYMBOL_GPL(flow_offload_free);
+
+void flow_offload_dead(struct flow_offload *flow)
+{
+	flow->flags |= FLOW_OFFLOAD_DYING;
+}
+EXPORT_SYMBOL_GPL(flow_offload_dead);
+
+int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
+{
+	flow->timeout = (u32)jiffies;
+
+	rhashtable_insert_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+			       *flow_table->type->params);
+	rhashtable_insert_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+			       *flow_table->type->params);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(flow_offload_add);
+
+void flow_offload_del(struct nf_flowtable *flow_table,
+		      struct flow_offload *flow)
+{
+	struct flow_offload_entry *e;
+
+	rhashtable_remove_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+			       *flow_table->type->params);
+	rhashtable_remove_fast(&flow_table->rhashtable,
+			       &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+			       *flow_table->type->params);
+
+	e = container_of(flow, struct flow_offload_entry, flow);
+	kfree_rcu(e, rcu_head);
+}
+EXPORT_SYMBOL_GPL(flow_offload_del);
+
+struct flow_offload_tuple_rhash *
+flow_offload_lookup(struct nf_flowtable *flow_table,
+		    struct flow_offload_tuple *tuple)
+{
+	return rhashtable_lookup_fast(&flow_table->rhashtable, tuple,
+				      *flow_table->type->params);
+}
+EXPORT_SYMBOL_GPL(flow_offload_lookup);
+
+static void nf_flow_release_ct(const struct flow_offload *flow)
+{
+	struct flow_offload_entry *e;
+
+	e = container_of(flow, struct flow_offload_entry, flow);
+	nf_ct_delete(e->ct, 0, 0);
+	nf_ct_put(e->ct);
+}
+
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+			  void (*iter)(struct flow_offload *flow, void *data),
+			  void *data)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct rhashtable_iter hti;
+	struct flow_offload *flow;
+	int err;
+
+	err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
+	if (err)
+		return err;
+
+	rhashtable_walk_start(&hti);
+
+	while ((tuplehash = rhashtable_walk_next(&hti))) {
+		if (IS_ERR(tuplehash)) {
+			err = PTR_ERR(tuplehash);
+			if (err != -EAGAIN)
+				goto out;
+
+			continue;
+		}
+		if (tuplehash->tuple.dir)
+			continue;
+
+		flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+		iter(flow, data);
+	}
+out:
+	rhashtable_walk_stop(&hti);
+	rhashtable_walk_exit(&hti);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
+
+static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+{
+	return (__s32)(flow->timeout - (u32)jiffies) <= 0;
+}
+
+static inline bool nf_flow_is_dying(const struct flow_offload *flow)
+{
+	return flow->flags & FLOW_OFFLOAD_DYING;
+}
+
+void nf_flow_offload_work_gc(struct work_struct *work)
+{
+	struct flow_offload_tuple_rhash *tuplehash;
+	struct nf_flowtable *flow_table;
+	struct rhashtable_iter hti;
+	struct flow_offload *flow;
+	int err;
+
+	flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+
+	err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
+	if (err)
+		goto schedule;
+
+	rhashtable_walk_start(&hti);
+
+	while ((tuplehash = rhashtable_walk_next(&hti))) {
+		if (IS_ERR(tuplehash)) {
+			err = PTR_ERR(tuplehash);
+			if (err != -EAGAIN)
+				goto out;
+
+			continue;
+		}
+		if (tuplehash->tuple.dir)
+			continue;
+
+		flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
+
+		if (nf_flow_has_expired(flow) ||
+		    nf_flow_is_dying(flow)) {
+			flow_offload_del(flow_table, flow);
+			nf_flow_release_ct(flow);
+		}
+	}
+out:
+	rhashtable_walk_stop(&hti);
+	rhashtable_walk_exit(&hti);
+schedule:
+	queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
+
+static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
+{
+	const struct flow_offload_tuple *tuple = data;
+
+	return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
+{
+	const struct flow_offload_tuple_rhash *tuplehash = data;
+
+	return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
+}
+
+static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
+					const void *ptr)
+{
+	const struct flow_offload_tuple *tuple = arg->key;
+	const struct flow_offload_tuple_rhash *x = ptr;
+
+	if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
+		return 1;
+
+	return 0;
+}
+
+const struct rhashtable_params nf_flow_offload_rhash_params = {
+	.head_offset		= offsetof(struct flow_offload_tuple_rhash, node),
+	.hashfn			= flow_offload_hash,
+	.obj_hashfn		= flow_offload_hash_obj,
+	.obj_cmpfn		= flow_offload_hash_cmp,
+	.automatic_shrinking	= true,
+};
+EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params);
+
+static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
+				__be16 port, __be16 new_port)
+{
+	struct tcphdr *tcph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+		return -1;
+
+	tcph = (void *)(skb_network_header(skb) + thoff);
+	inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
+
+	return 0;
+}
+
+static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
+				__be16 port, __be16 new_port)
+{
+	struct udphdr *udph;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*udph)))
+		return -1;
+
+	udph = (void *)(skb_network_header(skb) + thoff);
+	if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+		inet_proto_csum_replace2(&udph->check, skb, port,
+					 new_port, true);
+		if (!udph->check)
+			udph->check = CSUM_MANGLED_0;
+	}
+
+	return 0;
+}
+
+static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
+			    u8 protocol, __be16 port, __be16 new_port)
+{
+	switch (protocol) {
+	case IPPROTO_TCP:
+		if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
+			return NF_DROP;
+		break;
+	case IPPROTO_UDP:
+		if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
+			return NF_DROP;
+		break;
+	}
+
+	return 0;
+}
+
+int nf_flow_snat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir)
+{
+	struct flow_ports *hdr;
+	__be16 port, new_port;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+		return -1;
+
+	hdr = (void *)(skb_network_header(skb) + thoff);
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		port = hdr->source;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
+		hdr->source = new_port;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		port = hdr->dest;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
+		hdr->dest = new_port;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
+}
+EXPORT_SYMBOL_GPL(nf_flow_snat_port);
+
+int nf_flow_dnat_port(const struct flow_offload *flow,
+		      struct sk_buff *skb, unsigned int thoff,
+		      u8 protocol, enum flow_offload_tuple_dir dir)
+{
+	struct flow_ports *hdr;
+	__be16 port, new_port;
+
+	if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
+	    skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+		return -1;
+
+	hdr = (void *)(skb_network_header(skb) + thoff);
+
+	switch (dir) {
+	case FLOW_OFFLOAD_DIR_ORIGINAL:
+		port = hdr->dest;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
+		hdr->dest = new_port;
+		break;
+	case FLOW_OFFLOAD_DIR_REPLY:
+		port = hdr->source;
+		new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
+		hdr->source = new_port;
+		break;
+	default:
+		return -1;
+	}
+
+	return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
+}
+EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
new file mode 100644
index 0000000..281209ae
--- /dev/null
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_tables.h>
+
+static unsigned int
+nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
+			  const struct nf_hook_state *state)
+{
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		return nf_flow_offload_ip_hook(priv, skb, state);
+	case htons(ETH_P_IPV6):
+		return nf_flow_offload_ipv6_hook(priv, skb, state);
+	}
+
+	return NF_ACCEPT;
+}
+
+static struct nf_flowtable_type flowtable_inet = {
+	.family		= NFPROTO_INET,
+	.params		= &nf_flow_offload_rhash_params,
+	.gc		= nf_flow_offload_work_gc,
+	.hook		= nf_flow_offload_inet_hook,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nf_flow_inet_module_init(void)
+{
+	nft_register_flowtable_type(&flowtable_inet);
+
+	return 0;
+}
+
+static void __exit nf_flow_inet_module_exit(void)
+{
+	nft_unregister_flowtable_type(&flowtable_inet);
+}
+
+module_init(nf_flow_inet_module_init);
+module_exit(nf_flow_inet_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 44284cd..18f6d7a 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -10,7 +10,7 @@
 int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
 	     const struct nf_hook_entries *entries, unsigned int index,
 	     unsigned int verdict);
-unsigned int nf_queue_nf_hook_drop(struct net *net);
+void nf_queue_nf_hook_drop(struct net *net);
 
 /* nf_log.c */
 int __init netfilter_log_init(void);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index f7e2195..7f55af5 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -10,9 +10,13 @@
 #include <linux/proc_fs.h>
 #include <linux/skbuff.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
 #include <linux/seq_file.h>
 #include <linux/rcupdate.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
 #include <net/protocol.h>
 #include <net/netfilter/nf_queue.h>
 #include <net/dst.h>
@@ -96,30 +100,56 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
-unsigned int nf_queue_nf_hook_drop(struct net *net)
+void nf_queue_nf_hook_drop(struct net *net)
 {
 	const struct nf_queue_handler *qh;
-	unsigned int count = 0;
 
 	rcu_read_lock();
 	qh = rcu_dereference(net->nf.queue_handler);
 	if (qh)
-		count = qh->nf_hook_drop(net);
+		qh->nf_hook_drop(net);
 	rcu_read_unlock();
-
-	return count;
 }
 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
 
+static void nf_ip_saveroute(const struct sk_buff *skb,
+			    struct nf_queue_entry *entry)
+{
+	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+	if (entry->state.hook == NF_INET_LOCAL_OUT) {
+		const struct iphdr *iph = ip_hdr(skb);
+
+		rt_info->tos = iph->tos;
+		rt_info->daddr = iph->daddr;
+		rt_info->saddr = iph->saddr;
+		rt_info->mark = skb->mark;
+	}
+}
+
+static void nf_ip6_saveroute(const struct sk_buff *skb,
+			     struct nf_queue_entry *entry)
+{
+	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
+
+	if (entry->state.hook == NF_INET_LOCAL_OUT) {
+		const struct ipv6hdr *iph = ipv6_hdr(skb);
+
+		rt_info->daddr = iph->daddr;
+		rt_info->saddr = iph->saddr;
+		rt_info->mark = skb->mark;
+	}
+}
+
 static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 		      const struct nf_hook_entries *entries,
 		      unsigned int index, unsigned int queuenum)
 {
 	int status = -ENOENT;
 	struct nf_queue_entry *entry = NULL;
-	const struct nf_afinfo *afinfo;
 	const struct nf_queue_handler *qh;
 	struct net *net = state->net;
+	unsigned int route_key_size;
 
 	/* QUEUE == DROP if no one is waiting, to be safe. */
 	qh = rcu_dereference(net->nf.queue_handler);
@@ -128,11 +158,19 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 		goto err;
 	}
 
-	afinfo = nf_get_afinfo(state->pf);
-	if (!afinfo)
-		goto err;
+	switch (state->pf) {
+	case AF_INET:
+		route_key_size = sizeof(struct ip_rt_info);
+		break;
+	case AF_INET6:
+		route_key_size = sizeof(struct ip6_rt_info);
+		break;
+	default:
+		route_key_size = 0;
+		break;
+	}
 
-	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
+	entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
 	if (!entry) {
 		status = -ENOMEM;
 		goto err;
@@ -142,12 +180,21 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
 		.skb	= skb,
 		.state	= *state,
 		.hook_index = index,
-		.size	= sizeof(*entry) + afinfo->route_key_size,
+		.size	= sizeof(*entry) + route_key_size,
 	};
 
 	nf_queue_entry_get_refs(entry);
 	skb_dst_force(skb);
-	afinfo->saveroute(skb, entry);
+
+	switch (entry->state.pf) {
+	case AF_INET:
+		nf_ip_saveroute(skb, entry);
+		break;
+	case AF_INET6:
+		nf_ip6_saveroute(skb, entry);
+		break;
+	}
+
 	status = qh->outfn(entry, queuenum);
 
 	if (status < 0) {
@@ -204,13 +251,31 @@ static unsigned int nf_iterate(struct sk_buff *skb,
 	return NF_ACCEPT;
 }
 
+static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
+{
+	switch (pf) {
+#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
+	case NFPROTO_BRIDGE:
+		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
+#endif
+	case NFPROTO_IPV4:
+		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
+	case NFPROTO_IPV6:
+		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
+	default:
+		WARN_ON_ONCE(1);
+		return NULL;
+	}
+
+	return NULL;
+}
+
 /* Caller must hold rcu read-side lock */
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 {
 	const struct nf_hook_entry *hook_entry;
 	const struct nf_hook_entries *hooks;
 	struct sk_buff *skb = entry->skb;
-	const struct nf_afinfo *afinfo;
 	const struct net *net;
 	unsigned int i;
 	int err;
@@ -219,12 +284,12 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 	net = entry->state.net;
 	pf = entry->state.pf;
 
-	hooks = rcu_dereference(net->nf.hooks[pf][entry->state.hook]);
+	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
 
 	nf_queue_entry_release_refs(entry);
 
 	i = entry->hook_index;
-	if (WARN_ON_ONCE(i >= hooks->num_hook_entries)) {
+	if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
 		kfree_skb(skb);
 		kfree(entry);
 		return;
@@ -237,8 +302,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
 		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
 
 	if (verdict == NF_ACCEPT) {
-		afinfo = nf_get_afinfo(entry->state.pf);
-		if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
+		if (nf_reroute(skb, entry) < 0)
 			verdict = NF_DROP;
 	}
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 07bd413..336b816 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -17,6 +17,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_flow_table.h>
 #include <net/netfilter/nf_tables_core.h>
 #include <net/netfilter/nf_tables.h>
 #include <net/net_namespace.h>
@@ -24,6 +25,7 @@
 
 static LIST_HEAD(nf_tables_expressions);
 static LIST_HEAD(nf_tables_objects);
+static LIST_HEAD(nf_tables_flowtables);
 
 /**
  *	nft_register_afinfo - register nf_tables address family info
@@ -139,29 +141,26 @@ static void nft_trans_destroy(struct nft_trans *trans)
 	kfree(trans);
 }
 
-static int nf_tables_register_hooks(struct net *net,
-				    const struct nft_table *table,
-				    struct nft_chain *chain,
-				    unsigned int hook_nops)
+static int nf_tables_register_hook(struct net *net,
+				   const struct nft_table *table,
+				   struct nft_chain *chain)
 {
 	if (table->flags & NFT_TABLE_F_DORMANT ||
 	    !nft_is_base_chain(chain))
 		return 0;
 
-	return nf_register_net_hooks(net, nft_base_chain(chain)->ops,
-				     hook_nops);
+	return nf_register_net_hook(net, &nft_base_chain(chain)->ops);
 }
 
-static void nf_tables_unregister_hooks(struct net *net,
-				       const struct nft_table *table,
-				       struct nft_chain *chain,
-				       unsigned int hook_nops)
+static void nf_tables_unregister_hook(struct net *net,
+				      const struct nft_table *table,
+				      struct nft_chain *chain)
 {
 	if (table->flags & NFT_TABLE_F_DORMANT ||
 	    !nft_is_base_chain(chain))
 		return;
 
-	nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops);
+	nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
 }
 
 static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -348,6 +347,40 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj)
 	return err;
 }
 
+static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type,
+				   struct nft_flowtable *flowtable)
+{
+	struct nft_trans *trans;
+
+	trans = nft_trans_alloc(ctx, msg_type,
+				sizeof(struct nft_trans_flowtable));
+	if (trans == NULL)
+		return -ENOMEM;
+
+	if (msg_type == NFT_MSG_NEWFLOWTABLE)
+		nft_activate_next(ctx->net, flowtable);
+
+	nft_trans_flowtable(trans) = flowtable;
+	list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+
+	return 0;
+}
+
+static int nft_delflowtable(struct nft_ctx *ctx,
+			    struct nft_flowtable *flowtable)
+{
+	int err;
+
+	err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable);
+	if (err < 0)
+		return err;
+
+	nft_deactivate_next(ctx->net, flowtable);
+	ctx->table->use--;
+
+	return err;
+}
+
 /*
  * Tables
  */
@@ -595,8 +628,7 @@ static void _nf_tables_table_disable(struct net *net,
 		if (cnt && i++ == cnt)
 			break;
 
-		nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
-					afi->nops);
+		nf_unregister_net_hook(net, &nft_base_chain(chain)->ops);
 	}
 }
 
@@ -613,8 +645,7 @@ static int nf_tables_table_enable(struct net *net,
 		if (!nft_is_base_chain(chain))
 			continue;
 
-		err = nf_register_net_hooks(net, nft_base_chain(chain)->ops,
-					    afi->nops);
+		err = nf_register_net_hook(net, &nft_base_chain(chain)->ops);
 		if (err < 0)
 			goto err;
 
@@ -733,6 +764,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
 	INIT_LIST_HEAD(&table->chains);
 	INIT_LIST_HEAD(&table->sets);
 	INIT_LIST_HEAD(&table->objects);
+	INIT_LIST_HEAD(&table->flowtables);
 	table->flags = flags;
 
 	nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
@@ -754,10 +786,11 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
 
 static int nft_flush_table(struct nft_ctx *ctx)
 {
-	int err;
+	struct nft_flowtable *flowtable, *nft;
 	struct nft_chain *chain, *nc;
 	struct nft_object *obj, *ne;
 	struct nft_set *set, *ns;
+	int err;
 
 	list_for_each_entry(chain, &ctx->table->chains, list) {
 		if (!nft_is_active_next(ctx->net, chain))
@@ -774,7 +807,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
 		if (!nft_is_active_next(ctx->net, set))
 			continue;
 
-		if (set->flags & NFT_SET_ANONYMOUS &&
+		if (nft_set_is_anonymous(set) &&
 		    !list_empty(&set->bindings))
 			continue;
 
@@ -783,6 +816,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
 			goto out;
 	}
 
+	list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) {
+		err = nft_delflowtable(ctx, flowtable);
+		if (err < 0)
+			goto out;
+	}
+
 	list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) {
 		err = nft_delobj(ctx, obj);
 		if (err < 0)
@@ -1026,7 +1065,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
 
 	if (nft_is_base_chain(chain)) {
 		const struct nft_base_chain *basechain = nft_base_chain(chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 		struct nlattr *nest;
 
 		nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
@@ -1227,13 +1266,13 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
 static void nft_chain_stats_replace(struct nft_base_chain *chain,
 				    struct nft_stats __percpu *newstats)
 {
+	struct nft_stats __percpu *oldstats;
+
 	if (newstats == NULL)
 		return;
 
 	if (chain->stats) {
-		struct nft_stats __percpu *oldstats =
-				nft_dereference(chain->stats);
-
+		oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
 		rcu_assign_pointer(chain->stats, newstats);
 		synchronize_rcu();
 		free_percpu(oldstats);
@@ -1252,8 +1291,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
 		free_percpu(basechain->stats);
 		if (basechain->stats)
 			static_branch_dec(&nft_counters_enabled);
-		if (basechain->ops[0].dev != NULL)
-			dev_put(basechain->ops[0].dev);
+		if (basechain->ops.dev != NULL)
+			dev_put(basechain->ops.dev);
 		kfree(chain->name);
 		kfree(basechain);
 	} else {
@@ -1264,7 +1303,7 @@ static void nf_tables_chain_destroy(struct nft_chain *chain)
 
 struct nft_chain_hook {
 	u32				num;
-	u32				priority;
+	s32				priority;
 	const struct nf_chain_type	*type;
 	struct net_device		*dev;
 };
@@ -1303,6 +1342,11 @@ static int nft_chain_parse_hook(struct net *net,
 	}
 	if (!(type->hook_mask & (1 << hook->num)))
 		return -EOPNOTSUPP;
+
+	if (type->type == NFT_CHAIN_T_NAT &&
+	    hook->priority <= NF_IP_PRI_CONNTRACK)
+		return -EOPNOTSUPP;
+
 	if (!try_module_get(type->owner))
 		return -ENOENT;
 
@@ -1349,7 +1393,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 	struct nft_stats __percpu *stats;
 	struct net *net = ctx->net;
 	struct nft_chain *chain;
-	unsigned int i;
 	int err;
 
 	if (table->use == UINT_MAX)
@@ -1358,7 +1401,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 	if (nla[NFTA_CHAIN_HOOK]) {
 		struct nft_chain_hook hook;
 		struct nf_hook_ops *ops;
-		nf_hookfn *hookfn;
 
 		err = nft_chain_parse_hook(net, nla, afi, &hook, create);
 		if (err < 0)
@@ -1384,23 +1426,19 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 			static_branch_inc(&nft_counters_enabled);
 		}
 
-		hookfn = hook.type->hooks[hook.num];
 		basechain->type = hook.type;
 		chain = &basechain->chain;
 
-		for (i = 0; i < afi->nops; i++) {
-			ops = &basechain->ops[i];
-			ops->pf		= family;
-			ops->hooknum	= hook.num;
-			ops->priority	= hook.priority;
-			ops->priv	= chain;
-			ops->hook	= afi->hooks[ops->hooknum];
-			ops->dev	= hook.dev;
-			if (hookfn)
-				ops->hook = hookfn;
-			if (afi->hook_ops_init)
-				afi->hook_ops_init(ops, i);
-		}
+		ops		= &basechain->ops;
+		ops->pf		= family;
+		ops->hooknum	= hook.num;
+		ops->priority	= hook.priority;
+		ops->priv	= chain;
+		ops->hook	= hook.type->hooks[ops->hooknum];
+		ops->dev	= hook.dev;
+
+		if (basechain->type->type == NFT_CHAIN_T_NAT)
+			ops->nat_hook = true;
 
 		chain->flags |= NFT_BASE_CHAIN;
 		basechain->policy = policy;
@@ -1418,7 +1456,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 		goto err1;
 	}
 
-	err = nf_tables_register_hooks(net, table, chain, afi->nops);
+	err = nf_tables_register_hook(net, table, chain);
 	if (err < 0)
 		goto err1;
 
@@ -1432,7 +1470,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
 
 	return 0;
 err2:
-	nf_tables_unregister_hooks(net, table, chain, afi->nops);
+	nf_tables_unregister_hook(net, table, chain);
 err1:
 	nf_tables_chain_destroy(chain);
 
@@ -1445,14 +1483,13 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
 	const struct nlattr * const *nla = ctx->nla;
 	struct nft_table *table = ctx->table;
 	struct nft_chain *chain = ctx->chain;
-	struct nft_af_info *afi = ctx->afi;
 	struct nft_base_chain *basechain;
 	struct nft_stats *stats = NULL;
 	struct nft_chain_hook hook;
 	const struct nlattr *name;
 	struct nf_hook_ops *ops;
 	struct nft_trans *trans;
-	int err, i;
+	int err;
 
 	if (nla[NFTA_CHAIN_HOOK]) {
 		if (!nft_is_base_chain(chain))
@@ -1469,14 +1506,12 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
 			return -EBUSY;
 		}
 
-		for (i = 0; i < afi->nops; i++) {
-			ops = &basechain->ops[i];
-			if (ops->hooknum != hook.num ||
-			    ops->priority != hook.priority ||
-			    ops->dev != hook.dev) {
-				nft_chain_release_hook(&hook);
-				return -EBUSY;
-			}
+		ops = &basechain->ops;
+		if (ops->hooknum != hook.num ||
+		    ops->priority != hook.priority ||
+		    ops->dev != hook.dev) {
+			nft_chain_release_hook(&hook);
+			return -EBUSY;
 		}
 		nft_chain_release_hook(&hook);
 	}
@@ -3277,7 +3312,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
 	struct nft_set_binding *i;
 	struct nft_set_iter iter;
 
-	if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+	if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
 		return -EBUSY;
 
 	if (binding->flags & NFT_SET_MAP) {
@@ -3312,7 +3347,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
 {
 	list_del_rcu(&binding->list);
 
-	if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
+	if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
 	    nft_is_active(ctx->net, set))
 		nf_tables_set_destroy(ctx, set);
 }
@@ -4850,6 +4885,605 @@ static void nf_tables_obj_notify(const struct nft_ctx *ctx,
 		       ctx->afi->family, ctx->report, GFP_KERNEL);
 }
 
+/*
+ * Flow tables
+ */
+void nft_register_flowtable_type(struct nf_flowtable_type *type)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_add_tail_rcu(&type->list, &nf_tables_flowtables);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_register_flowtable_type);
+
+void nft_unregister_flowtable_type(struct nf_flowtable_type *type)
+{
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_del_rcu(&type->list);
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_flowtable_type);
+
+static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = {
+	[NFTA_FLOWTABLE_TABLE]		= { .type = NLA_STRING,
+					    .len = NFT_NAME_MAXLEN - 1 },
+	[NFTA_FLOWTABLE_NAME]		= { .type = NLA_STRING,
+					    .len = NFT_NAME_MAXLEN - 1 },
+	[NFTA_FLOWTABLE_HOOK]		= { .type = NLA_NESTED },
+};
+
+struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+						 const struct nlattr *nla,
+						 u8 genmask)
+{
+	struct nft_flowtable *flowtable;
+
+	list_for_each_entry(flowtable, &table->flowtables, list) {
+		if (!nla_strcmp(nla, flowtable->name) &&
+		    nft_active_genmask(flowtable, genmask))
+			return flowtable;
+	}
+	return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup);
+
+#define NFT_FLOWTABLE_DEVICE_MAX	8
+
+static int nf_tables_parse_devices(const struct nft_ctx *ctx,
+				   const struct nlattr *attr,
+				   struct net_device *dev_array[], int *len)
+{
+	const struct nlattr *tmp;
+	struct net_device *dev;
+	char ifname[IFNAMSIZ];
+	int rem, n = 0, err;
+
+	nla_for_each_nested(tmp, attr, rem) {
+		if (nla_type(tmp) != NFTA_DEVICE_NAME) {
+			err = -EINVAL;
+			goto err1;
+		}
+
+		nla_strlcpy(ifname, tmp, IFNAMSIZ);
+		dev = dev_get_by_name(ctx->net, ifname);
+		if (!dev) {
+			err = -ENOENT;
+			goto err1;
+		}
+
+		dev_array[n++] = dev;
+		if (n == NFT_FLOWTABLE_DEVICE_MAX) {
+			err = -EFBIG;
+			goto err1;
+		}
+	}
+	if (!len)
+		return -EINVAL;
+
+	err = 0;
+err1:
+	*len = n;
+	return err;
+}
+
+static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX + 1] = {
+	[NFTA_FLOWTABLE_HOOK_NUM]	= { .type = NLA_U32 },
+	[NFTA_FLOWTABLE_HOOK_PRIORITY]	= { .type = NLA_U32 },
+	[NFTA_FLOWTABLE_HOOK_DEVS]	= { .type = NLA_NESTED },
+};
+
+static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
+					  const struct nlattr *attr,
+					  struct nft_flowtable *flowtable)
+{
+	struct net_device *dev_array[NFT_FLOWTABLE_DEVICE_MAX];
+	struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1];
+	struct nf_hook_ops *ops;
+	int hooknum, priority;
+	int err, n = 0, i;
+
+	err = nla_parse_nested(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
+			       nft_flowtable_hook_policy, NULL);
+	if (err < 0)
+		return err;
+
+	if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
+	    !tb[NFTA_FLOWTABLE_HOOK_PRIORITY] ||
+	    !tb[NFTA_FLOWTABLE_HOOK_DEVS])
+		return -EINVAL;
+
+	hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
+	if (hooknum >= ctx->afi->nhooks)
+		return -EINVAL;
+
+	priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY]));
+
+	err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS],
+				      dev_array, &n);
+	if (err < 0)
+		goto err1;
+
+	ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL);
+	if (!ops) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	flowtable->ops		= ops;
+	flowtable->ops_len	= n;
+
+	for (i = 0; i < n; i++) {
+		flowtable->ops[i].pf		= NFPROTO_NETDEV;
+		flowtable->ops[i].hooknum	= hooknum;
+		flowtable->ops[i].priority	= priority;
+		flowtable->ops[i].priv		= &flowtable->data.rhashtable;
+		flowtable->ops[i].hook		= flowtable->data.type->hook;
+		flowtable->ops[i].dev		= dev_array[i];
+	}
+
+	err = 0;
+err1:
+	for (i = 0; i < n; i++)
+		dev_put(dev_array[i]);
+
+	return err;
+}
+
+static const struct nf_flowtable_type *
+__nft_flowtable_type_get(const struct nft_af_info *afi)
+{
+	const struct nf_flowtable_type *type;
+
+	list_for_each_entry(type, &nf_tables_flowtables, list) {
+		if (afi->family == type->family)
+			return type;
+	}
+	return NULL;
+}
+
+static const struct nf_flowtable_type *
+nft_flowtable_type_get(const struct nft_af_info *afi)
+{
+	const struct nf_flowtable_type *type;
+
+	type = __nft_flowtable_type_get(afi);
+	if (type != NULL && try_module_get(type->owner))
+		return type;
+
+#ifdef CONFIG_MODULES
+	if (type == NULL) {
+		nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+		request_module("nf-flowtable-%u", afi->family);
+		nfnl_lock(NFNL_SUBSYS_NFTABLES);
+		if (__nft_flowtable_type_get(afi))
+			return ERR_PTR(-EAGAIN);
+	}
+#endif
+	return ERR_PTR(-ENOENT);
+}
+
+void nft_flow_table_iterate(struct net *net,
+			    void (*iter)(struct nf_flowtable *flowtable, void *data),
+			    void *data)
+{
+	struct nft_flowtable *flowtable;
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
+		list_for_each_entry_rcu(table, &afi->tables, list) {
+			list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+				iter(&flowtable->data, data);
+			}
+		}
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
+
+static void nft_unregister_flowtable_net_hooks(struct net *net,
+					       struct nft_flowtable *flowtable)
+{
+	int i;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		if (!flowtable->ops[i].dev)
+			continue;
+
+		nf_unregister_net_hook(net, &flowtable->ops[i]);
+	}
+}
+
+static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
+				  struct sk_buff *skb,
+				  const struct nlmsghdr *nlh,
+				  const struct nlattr * const nla[],
+				  struct netlink_ext_ack *extack)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	const struct nf_flowtable_type *type;
+	u8 genmask = nft_genmask_next(net);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	struct nft_af_info *afi;
+	struct nft_table *table;
+	struct nft_ctx ctx;
+	int err, i, k;
+
+	if (!nla[NFTA_FLOWTABLE_TABLE] ||
+	    !nla[NFTA_FLOWTABLE_NAME] ||
+	    !nla[NFTA_FLOWTABLE_HOOK])
+		return -EINVAL;
+
+	afi = nf_tables_afinfo_lookup(net, family, true);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_FLOWTABLE_TABLE], genmask);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+					       genmask);
+	if (IS_ERR(flowtable)) {
+		err = PTR_ERR(flowtable);
+		if (err != -ENOENT)
+			return err;
+	} else {
+		if (nlh->nlmsg_flags & NLM_F_EXCL)
+			return -EEXIST;
+
+		return 0;
+	}
+
+	nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+
+	flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL);
+	if (!flowtable)
+		return -ENOMEM;
+
+	flowtable->table = table;
+	flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL);
+	if (!flowtable->name) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	type = nft_flowtable_type_get(afi);
+	if (IS_ERR(type)) {
+		err = PTR_ERR(type);
+		goto err2;
+	}
+
+	flowtable->data.type = type;
+	err = rhashtable_init(&flowtable->data.rhashtable, type->params);
+	if (err < 0)
+		goto err3;
+
+	err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
+					     flowtable);
+	if (err < 0)
+		goto err3;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		err = nf_register_net_hook(net, &flowtable->ops[i]);
+		if (err < 0)
+			goto err4;
+	}
+
+	err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
+	if (err < 0)
+		goto err5;
+
+	INIT_DEFERRABLE_WORK(&flowtable->data.gc_work, type->gc);
+	queue_delayed_work(system_power_efficient_wq,
+			   &flowtable->data.gc_work, HZ);
+
+	list_add_tail_rcu(&flowtable->list, &table->flowtables);
+	table->use++;
+
+	return 0;
+err5:
+	i = flowtable->ops_len;
+err4:
+	for (k = i - 1; k >= 0; k--)
+		nf_unregister_net_hook(net, &flowtable->ops[i]);
+
+	kfree(flowtable->ops);
+err3:
+	module_put(type->owner);
+err2:
+	kfree(flowtable->name);
+err1:
+	kfree(flowtable);
+	return err;
+}
+
+static int nf_tables_delflowtable(struct net *net, struct sock *nlsk,
+				  struct sk_buff *skb,
+				  const struct nlmsghdr *nlh,
+				  const struct nlattr * const nla[],
+				  struct netlink_ext_ack *extack)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	u8 genmask = nft_genmask_next(net);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	struct nft_af_info *afi;
+	struct nft_table *table;
+	struct nft_ctx ctx;
+
+	afi = nf_tables_afinfo_lookup(net, family, true);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_FLOWTABLE_TABLE], genmask);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+					       genmask);
+	if (IS_ERR(flowtable))
+                return PTR_ERR(flowtable);
+	if (flowtable->use > 0)
+		return -EBUSY;
+
+	nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
+
+	return nft_delflowtable(&ctx, flowtable);
+}
+
+static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
+					 u32 portid, u32 seq, int event,
+					 u32 flags, int family,
+					 struct nft_flowtable *flowtable)
+{
+	struct nlattr *nest, *nest_devs;
+	struct nfgenmsg *nfmsg;
+	struct nlmsghdr *nlh;
+	int i;
+
+	event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event);
+	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+	if (nlh == NULL)
+		goto nla_put_failure;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family	= family;
+	nfmsg->version		= NFNETLINK_V0;
+	nfmsg->res_id		= htons(net->nft.base_seq & 0xffff);
+
+	if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) ||
+	    nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
+	    nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)))
+		goto nla_put_failure;
+
+	nest = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK);
+	if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) ||
+	    nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority)))
+		goto nla_put_failure;
+
+	nest_devs = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK_DEVS);
+	if (!nest_devs)
+		goto nla_put_failure;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		if (flowtable->ops[i].dev &&
+		    nla_put_string(skb, NFTA_DEVICE_NAME,
+				   flowtable->ops[i].dev->name))
+			goto nla_put_failure;
+	}
+	nla_nest_end(skb, nest_devs);
+	nla_nest_end(skb, nest);
+
+	nlmsg_end(skb, nlh);
+	return 0;
+
+nla_put_failure:
+	nlmsg_trim(skb, nlh);
+	return -1;
+}
+
+struct nft_flowtable_filter {
+	char		*table;
+};
+
+static int nf_tables_dump_flowtable(struct sk_buff *skb,
+				    struct netlink_callback *cb)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+	struct nft_flowtable_filter *filter = cb->data;
+	unsigned int idx = 0, s_idx = cb->args[0];
+	struct net *net = sock_net(skb->sk);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+
+	rcu_read_lock();
+	cb->seq = net->nft.base_seq;
+
+	list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
+		if (family != NFPROTO_UNSPEC && family != afi->family)
+			continue;
+
+		list_for_each_entry_rcu(table, &afi->tables, list) {
+			list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+				if (!nft_is_active(net, flowtable))
+					goto cont;
+				if (idx < s_idx)
+					goto cont;
+				if (idx > s_idx)
+					memset(&cb->args[1], 0,
+					       sizeof(cb->args) - sizeof(cb->args[0]));
+				if (filter && filter->table[0] &&
+				    strcmp(filter->table, table->name))
+					goto cont;
+
+				if (nf_tables_fill_flowtable_info(skb, net, NETLINK_CB(cb->skb).portid,
+								  cb->nlh->nlmsg_seq,
+								  NFT_MSG_NEWFLOWTABLE,
+								  NLM_F_MULTI | NLM_F_APPEND,
+								  afi->family, flowtable) < 0)
+					goto done;
+
+				nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+				idx++;
+			}
+		}
+	}
+done:
+	rcu_read_unlock();
+
+	cb->args[0] = idx;
+	return skb->len;
+}
+
+static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
+{
+	struct nft_flowtable_filter *filter = cb->data;
+
+	if (!filter)
+		return 0;
+
+	kfree(filter->table);
+	kfree(filter);
+
+	return 0;
+}
+
+static struct nft_flowtable_filter *
+nft_flowtable_filter_alloc(const struct nlattr * const nla[])
+{
+	struct nft_flowtable_filter *filter;
+
+	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+	if (!filter)
+		return ERR_PTR(-ENOMEM);
+
+	if (nla[NFTA_FLOWTABLE_TABLE]) {
+		filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
+					   GFP_KERNEL);
+		if (!filter->table) {
+			kfree(filter);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+	return filter;
+}
+
+static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
+				  struct sk_buff *skb,
+				  const struct nlmsghdr *nlh,
+				  const struct nlattr * const nla[],
+				  struct netlink_ext_ack *extack)
+{
+	const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+	u8 genmask = nft_genmask_cur(net);
+	int family = nfmsg->nfgen_family;
+	struct nft_flowtable *flowtable;
+	const struct nft_af_info *afi;
+	const struct nft_table *table;
+	struct sk_buff *skb2;
+	int err;
+
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+		struct netlink_dump_control c = {
+			.dump = nf_tables_dump_flowtable,
+			.done = nf_tables_dump_flowtable_done,
+		};
+
+		if (nla[NFTA_FLOWTABLE_TABLE]) {
+			struct nft_flowtable_filter *filter;
+
+			filter = nft_flowtable_filter_alloc(nla);
+			if (IS_ERR(filter))
+				return -ENOMEM;
+
+			c.data = filter;
+		}
+		return netlink_dump_start(nlsk, skb, nlh, &c);
+	}
+
+	if (!nla[NFTA_FLOWTABLE_NAME])
+		return -EINVAL;
+
+	afi = nf_tables_afinfo_lookup(net, family, false);
+	if (IS_ERR(afi))
+		return PTR_ERR(afi);
+
+	table = nf_tables_table_lookup(afi, nla[NFTA_FLOWTABLE_TABLE], genmask);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME],
+					       genmask);
+	if (IS_ERR(table))
+		return PTR_ERR(flowtable);
+
+	skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (!skb2)
+		return -ENOMEM;
+
+	err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid,
+					    nlh->nlmsg_seq,
+					    NFT_MSG_NEWFLOWTABLE, 0, family,
+					    flowtable);
+	if (err < 0)
+		goto err;
+
+	return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+err:
+	kfree_skb(skb2);
+	return err;
+}
+
+static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
+				       struct nft_flowtable *flowtable,
+				       int event)
+{
+	struct sk_buff *skb;
+	int err;
+
+	if (ctx->report &&
+	    !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+		return;
+
+	skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+	if (skb == NULL)
+		goto err;
+
+	err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid,
+					    ctx->seq, event, 0,
+					    ctx->afi->family, flowtable);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto err;
+	}
+
+	nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
+		       ctx->report, GFP_KERNEL);
+	return;
+err:
+	nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
+}
+
+static void nft_flowtable_destroy(void *ptr, void *arg)
+{
+	kfree(ptr);
+}
+
+static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
+{
+	cancel_delayed_work_sync(&flowtable->data.gc_work);
+	kfree(flowtable->name);
+	rhashtable_free_and_destroy(&flowtable->data.rhashtable,
+				    nft_flowtable_destroy, NULL);
+	module_put(flowtable->data.type->owner);
+}
+
 static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
 				   u32 portid, u32 seq)
 {
@@ -4880,6 +5514,49 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
 	return -EMSGSIZE;
 }
 
+static void nft_flowtable_event(unsigned long event, struct net_device *dev,
+				struct nft_flowtable *flowtable)
+{
+	int i;
+
+	for (i = 0; i < flowtable->ops_len; i++) {
+		if (flowtable->ops[i].dev != dev)
+			continue;
+
+		nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
+		flowtable->ops[i].dev = NULL;
+		break;
+	}
+}
+
+static int nf_tables_flowtable_event(struct notifier_block *this,
+				     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct nft_flowtable *flowtable;
+	struct nft_table *table;
+	struct nft_af_info *afi;
+
+	if (event != NETDEV_UNREGISTER)
+		return 0;
+
+	nfnl_lock(NFNL_SUBSYS_NFTABLES);
+	list_for_each_entry(afi, &dev_net(dev)->nft.af_info, list) {
+		list_for_each_entry(table, &afi->tables, list) {
+			list_for_each_entry(flowtable, &table->flowtables, list) {
+				nft_flowtable_event(event, dev, flowtable);
+			}
+		}
+	}
+	nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nf_tables_flowtable_notifier = {
+	.notifier_call	= nf_tables_flowtable_event,
+};
+
 static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb,
 				 int event)
 {
@@ -5032,6 +5709,21 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
 		.attr_count	= NFTA_OBJ_MAX,
 		.policy		= nft_obj_policy,
 	},
+	[NFT_MSG_NEWFLOWTABLE] = {
+		.call_batch	= nf_tables_newflowtable,
+		.attr_count	= NFTA_FLOWTABLE_MAX,
+		.policy		= nft_flowtable_policy,
+	},
+	[NFT_MSG_GETFLOWTABLE] = {
+		.call		= nf_tables_getflowtable,
+		.attr_count	= NFTA_FLOWTABLE_MAX,
+		.policy		= nft_flowtable_policy,
+	},
+	[NFT_MSG_DELFLOWTABLE] = {
+		.call_batch	= nf_tables_delflowtable,
+		.attr_count	= NFTA_FLOWTABLE_MAX,
+		.policy		= nft_flowtable_policy,
+	},
 };
 
 static void nft_chain_commit_update(struct nft_trans *trans)
@@ -5077,6 +5769,9 @@ static void nf_tables_commit_release(struct nft_trans *trans)
 	case NFT_MSG_DELOBJ:
 		nft_obj_destroy(nft_trans_obj(trans));
 		break;
+	case NFT_MSG_DELFLOWTABLE:
+		nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+		break;
 	}
 	kfree(trans);
 }
@@ -5129,10 +5824,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 		case NFT_MSG_DELCHAIN:
 			list_del_rcu(&trans->ctx.chain->list);
 			nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
-			nf_tables_unregister_hooks(trans->ctx.net,
-						   trans->ctx.table,
-						   trans->ctx.chain,
-						   trans->ctx.afi->nops);
+			nf_tables_unregister_hook(trans->ctx.net,
+						  trans->ctx.table,
+						  trans->ctx.chain);
 			break;
 		case NFT_MSG_NEWRULE:
 			nft_clear(trans->ctx.net, nft_trans_rule(trans));
@@ -5152,7 +5846,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 			/* This avoids hitting -EBUSY when deleting the table
 			 * from the transaction.
 			 */
-			if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS &&
+			if (nft_set_is_anonymous(nft_trans_set(trans)) &&
 			    !list_empty(&nft_trans_set(trans)->bindings))
 				trans->ctx.table->use--;
 
@@ -5195,6 +5889,21 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 			nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans),
 					     NFT_MSG_DELOBJ);
 			break;
+		case NFT_MSG_NEWFLOWTABLE:
+			nft_clear(net, nft_trans_flowtable(trans));
+			nf_tables_flowtable_notify(&trans->ctx,
+						   nft_trans_flowtable(trans),
+						   NFT_MSG_NEWFLOWTABLE);
+			nft_trans_destroy(trans);
+			break;
+		case NFT_MSG_DELFLOWTABLE:
+			list_del_rcu(&nft_trans_flowtable(trans)->list);
+			nf_tables_flowtable_notify(&trans->ctx,
+						   nft_trans_flowtable(trans),
+						   NFT_MSG_DELFLOWTABLE);
+			nft_unregister_flowtable_net_hooks(net,
+					nft_trans_flowtable(trans));
+			break;
 		}
 	}
 
@@ -5232,6 +5941,9 @@ static void nf_tables_abort_release(struct nft_trans *trans)
 	case NFT_MSG_NEWOBJ:
 		nft_obj_destroy(nft_trans_obj(trans));
 		break;
+	case NFT_MSG_NEWFLOWTABLE:
+		nf_tables_flowtable_destroy(nft_trans_flowtable(trans));
+		break;
 	}
 	kfree(trans);
 }
@@ -5269,10 +5981,9 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
 			} else {
 				trans->ctx.table->use--;
 				list_del_rcu(&trans->ctx.chain->list);
-				nf_tables_unregister_hooks(trans->ctx.net,
-							   trans->ctx.table,
-							   trans->ctx.chain,
-							   trans->ctx.afi->nops);
+				nf_tables_unregister_hook(trans->ctx.net,
+							  trans->ctx.table,
+							  trans->ctx.chain);
 			}
 			break;
 		case NFT_MSG_DELCHAIN:
@@ -5322,6 +6033,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
 			nft_clear(trans->ctx.net, nft_trans_obj(trans));
 			nft_trans_destroy(trans);
 			break;
+		case NFT_MSG_NEWFLOWTABLE:
+			trans->ctx.table->use--;
+			list_del_rcu(&nft_trans_flowtable(trans)->list);
+			nft_unregister_flowtable_net_hooks(net,
+					nft_trans_flowtable(trans));
+			break;
+		case NFT_MSG_DELFLOWTABLE:
+			trans->ctx.table->use++;
+			nft_clear(trans->ctx.net, nft_trans_flowtable(trans));
+			nft_trans_destroy(trans);
+			break;
 		}
 	}
 
@@ -5373,7 +6095,7 @@ int nft_chain_validate_hooks(const struct nft_chain *chain,
 	if (nft_is_base_chain(chain)) {
 		basechain = nft_base_chain(chain);
 
-		if ((1 << basechain->ops[0].hooknum) & hook_flags)
+		if ((1 << basechain->ops.hooknum) & hook_flags)
 			return 0;
 
 		return -EOPNOTSUPP;
@@ -5861,8 +6583,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
 
 	BUG_ON(!nft_is_base_chain(ctx->chain));
 
-	nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain,
-				   ctx->afi->nops);
+	nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain);
 	list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
 		list_del(&rule->list);
 		ctx->chain->use--;
@@ -5879,6 +6600,7 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain);
 /* Called by nft_unregister_afinfo() from __net_exit path, nfnl_lock is held. */
 static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 {
+	struct nft_flowtable *flowtable, *nf;
 	struct nft_table *table, *nt;
 	struct nft_chain *chain, *nc;
 	struct nft_object *obj, *ne;
@@ -5891,8 +6613,10 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 
 	list_for_each_entry_safe(table, nt, &afi->tables, list) {
 		list_for_each_entry(chain, &table->chains, list)
-			nf_tables_unregister_hooks(net, table, chain,
-						   afi->nops);
+			nf_tables_unregister_hook(net, table, chain);
+		list_for_each_entry(flowtable, &table->flowtables, list)
+			nf_unregister_net_hooks(net, flowtable->ops,
+						flowtable->ops_len);
 		/* No packets are walking on these chains anymore. */
 		ctx.table = table;
 		list_for_each_entry(chain, &table->chains, list) {
@@ -5903,6 +6627,11 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
 				nf_tables_rule_destroy(&ctx, rule);
 			}
 		}
+		list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) {
+			list_del(&flowtable->list);
+			table->use--;
+			nf_tables_flowtable_destroy(flowtable);
+		}
 		list_for_each_entry_safe(set, ns, &table->sets, list) {
 			list_del(&set->list);
 			table->use--;
@@ -5947,6 +6676,8 @@ static int __init nf_tables_module_init(void)
 	if (err < 0)
 		goto err3;
 
+	register_netdevice_notifier(&nf_tables_flowtable_notifier);
+
 	pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
 	return register_pernet_subsys(&nf_tables_net_ops);
 err3:
@@ -5961,6 +6692,7 @@ static void __exit nf_tables_module_exit(void)
 {
 	unregister_pernet_subsys(&nf_tables_net_ops);
 	nfnetlink_subsys_unregister(&nf_tables_subsys);
+	unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
 	rcu_barrier();
 	nf_tables_core_module_exit();
 	kfree(info);
diff --git a/net/netfilter/nf_tables_inet.c b/net/netfilter/nf_tables_inet.c
index f713cc2..58b9be7 100644
--- a/net/netfilter/nf_tables_inet.c
+++ b/net/netfilter/nf_tables_inet.c
@@ -9,6 +9,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv6.h>
 #include <net/netfilter/nf_tables.h>
@@ -16,26 +17,31 @@
 #include <net/netfilter/nf_tables_ipv6.h>
 #include <net/ip.h>
 
-static void nft_inet_hook_ops_init(struct nf_hook_ops *ops, unsigned int n)
+static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
+				      const struct nf_hook_state *state)
 {
-	struct nft_af_info *afi;
+	struct nft_pktinfo pkt;
 
-	if (n == 1)
-		afi = &nft_af_ipv4;
-	else
-		afi = &nft_af_ipv6;
+	nft_set_pktinfo(&pkt, skb, state);
 
-	ops->pf = afi->family;
-	if (afi->hooks[ops->hooknum])
-		ops->hook = afi->hooks[ops->hooknum];
+	switch (state->pf) {
+	case NFPROTO_IPV4:
+		nft_set_pktinfo_ipv4(&pkt, skb);
+		break;
+	case NFPROTO_IPV6:
+		nft_set_pktinfo_ipv6(&pkt, skb);
+		break;
+	default:
+		break;
+	}
+
+	return nft_do_chain(&pkt, priv);
 }
 
 static struct nft_af_info nft_af_inet __read_mostly = {
 	.family		= NFPROTO_INET,
 	.nhooks		= NF_INET_NUMHOOKS,
 	.owner		= THIS_MODULE,
-	.nops		= 2,
-	.hook_ops_init	= nft_inet_hook_ops_init,
 };
 
 static int __net_init nf_tables_inet_init_net(struct net *net)
@@ -76,6 +82,13 @@ static const struct nf_chain_type filter_inet = {
 			  (1 << NF_INET_FORWARD) |
 			  (1 << NF_INET_PRE_ROUTING) |
 			  (1 << NF_INET_POST_ROUTING),
+	.hooks		= {
+		[NF_INET_LOCAL_IN]	= nft_do_chain_inet,
+		[NF_INET_LOCAL_OUT]	= nft_do_chain_inet,
+		[NF_INET_FORWARD]	= nft_do_chain_inet,
+		[NF_INET_PRE_ROUTING]	= nft_do_chain_inet,
+		[NF_INET_POST_ROUTING]	= nft_do_chain_inet,
+        },
 };
 
 static int __init nf_tables_inet_init(void)
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
index 4034329..42f6f6d 100644
--- a/net/netfilter/nf_tables_netdev.c
+++ b/net/netfilter/nf_tables_netdev.c
@@ -21,15 +21,17 @@ nft_do_chain_netdev(void *priv, struct sk_buff *skb,
 {
 	struct nft_pktinfo pkt;
 
+	nft_set_pktinfo(&pkt, skb, state);
+
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
-		nft_set_pktinfo_ipv4_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv4_validate(&pkt, skb);
 		break;
 	case htons(ETH_P_IPV6):
-		nft_set_pktinfo_ipv6_validate(&pkt, skb, state);
+		nft_set_pktinfo_ipv6_validate(&pkt, skb);
 		break;
 	default:
-		nft_set_pktinfo_unspec(&pkt, skb, state);
+		nft_set_pktinfo_unspec(&pkt, skb);
 		break;
 	}
 
@@ -41,10 +43,6 @@ static struct nft_af_info nft_af_netdev __read_mostly = {
 	.nhooks		= NF_NETDEV_NUMHOOKS,
 	.owner		= THIS_MODULE,
 	.flags		= NFT_AF_NEEDS_DEV,
-	.nops		= 1,
-	.hooks		= {
-		[NF_NETDEV_INGRESS]	= nft_do_chain_netdev,
-	},
 };
 
 static int nf_tables_netdev_init_net(struct net *net)
@@ -81,6 +79,9 @@ static const struct nf_chain_type nft_filter_chain_netdev = {
 	.family		= NFPROTO_NETDEV,
 	.owner		= THIS_MODULE,
 	.hook_mask	= (1 << NF_NETDEV_INGRESS),
+	.hooks		= {
+		[NF_NETDEV_INGRESS]	= nft_do_chain_netdev,
+	},
 };
 
 static void nft_netdev_event(unsigned long event, struct net_device *dev,
@@ -96,7 +97,7 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
 		__nft_release_basechain(ctx);
 		break;
 	case NETDEV_CHANGENAME:
-		if (dev->ifindex != basechain->ops[0].dev->ifindex)
+		if (dev->ifindex != basechain->ops.dev->ifindex)
 			return;
 
 		strncpy(basechain->dev_name, dev->name, IFNAMSIZ);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index c09b367..2db35f2 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -941,23 +941,18 @@ static struct notifier_block nfqnl_dev_notifier = {
 	.notifier_call	= nfqnl_rcv_dev_event,
 };
 
-static unsigned int nfqnl_nf_hook_drop(struct net *net)
+static void nfqnl_nf_hook_drop(struct net *net)
 {
 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
-	unsigned int instances = 0;
 	int i;
 
 	for (i = 0; i < INSTANCE_BUCKETS; i++) {
 		struct nfqnl_instance *inst;
 		struct hlist_head *head = &q->instance_table[i];
 
-		hlist_for_each_entry_rcu(inst, head, hlist) {
+		hlist_for_each_entry_rcu(inst, head, hlist)
 			nfqnl_flush(inst, NULL, 0);
-			instances++;
-		}
 	}
-
-	return instances;
 }
 
 static int
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index c2945eb..fa90a84 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -44,6 +44,7 @@ static void nft_cmp_eval(const struct nft_expr *expr,
 	case NFT_CMP_LT:
 		if (d == 0)
 			goto mismatch;
+		/* fall through */
 	case NFT_CMP_LTE:
 		if (d > 0)
 			goto mismatch;
@@ -51,6 +52,7 @@ static void nft_cmp_eval(const struct nft_expr *expr,
 	case NFT_CMP_GT:
 		if (d == 0)
 			goto mismatch;
+		/* fall through */
 	case NFT_CMP_GTE:
 		if (d < 0)
 			goto mismatch;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index b89f4f6..dcff0dc 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -169,7 +169,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		par->hook_mask = 1 << ops->hooknum;
 	} else {
@@ -302,7 +302,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		hook_mask = 1 << ops->hooknum;
 		if (target->hooks && !(hook_mask & target->hooks))
@@ -383,7 +383,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		par->hook_mask = 1 << ops->hooknum;
 	} else {
@@ -481,7 +481,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
 	if (nft_is_base_chain(ctx->chain)) {
 		const struct nft_base_chain *basechain =
 						nft_base_chain(ctx->chain);
-		const struct nf_hook_ops *ops = &basechain->ops[0];
+		const struct nf_hook_ops *ops = &basechain->ops;
 
 		hook_mask = 1 << ops->hooknum;
 		if (match->hooks && !(hook_mask & match->hooks))
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 66221ad..ec0fd78 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -184,7 +184,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 	if (tb[NFTA_DYNSET_EXPR] != NULL) {
 		if (!(set->flags & NFT_SET_EVAL))
 			return -EINVAL;
-		if (!(set->flags & NFT_SET_ANONYMOUS))
+		if (!nft_set_is_anonymous(set))
 			return -EOPNOTSUPP;
 
 		priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]);
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
new file mode 100644
index 0000000..dd38785d
--- /dev/null
+++ b/net/netfilter/nft_flow_offload.c
@@ -0,0 +1,264 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/ip.h> /* for ipv4 options. */
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_flow_table.h>
+
+struct nft_flow_offload {
+	struct nft_flowtable	*flowtable;
+};
+
+static int nft_flow_route(const struct nft_pktinfo *pkt,
+			  const struct nf_conn *ct,
+			  struct nf_flow_route *route,
+			  enum ip_conntrack_dir dir)
+{
+	struct dst_entry *this_dst = skb_dst(pkt->skb);
+	struct dst_entry *other_dst = NULL;
+	struct flowi fl;
+
+	memset(&fl, 0, sizeof(fl));
+	switch (nft_pf(pkt)) {
+	case NFPROTO_IPV4:
+		fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+		break;
+	case NFPROTO_IPV6:
+		fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+		break;
+	}
+
+	nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
+	if (!other_dst)
+		return -ENOENT;
+
+	route->tuple[dir].dst		= this_dst;
+	route->tuple[dir].ifindex	= nft_in(pkt)->ifindex;
+	route->tuple[!dir].dst		= other_dst;
+	route->tuple[!dir].ifindex	= nft_out(pkt)->ifindex;
+
+	return 0;
+}
+
+static bool nft_flow_offload_skip(struct sk_buff *skb)
+{
+	struct ip_options *opt  = &(IPCB(skb)->opt);
+
+	if (unlikely(opt->optlen))
+		return true;
+	if (skb_sec_path(skb))
+		return true;
+
+	return false;
+}
+
+static void nft_flow_offload_eval(const struct nft_expr *expr,
+				  struct nft_regs *regs,
+				  const struct nft_pktinfo *pkt)
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+	struct nf_flowtable *flowtable = &priv->flowtable->data;
+	enum ip_conntrack_info ctinfo;
+	struct nf_flow_route route;
+	struct flow_offload *flow;
+	enum ip_conntrack_dir dir;
+	struct nf_conn *ct;
+	int ret;
+
+	if (nft_flow_offload_skip(pkt->skb))
+		goto out;
+
+	ct = nf_ct_get(pkt->skb, &ctinfo);
+	if (!ct)
+		goto out;
+
+	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
+	case IPPROTO_TCP:
+	case IPPROTO_UDP:
+		break;
+	default:
+		goto out;
+	}
+
+	if (test_bit(IPS_HELPER_BIT, &ct->status))
+		goto out;
+
+	if (ctinfo == IP_CT_NEW ||
+	    ctinfo == IP_CT_RELATED)
+		goto out;
+
+	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
+		goto out;
+
+	dir = CTINFO2DIR(ctinfo);
+	if (nft_flow_route(pkt, ct, &route, dir) < 0)
+		goto err_flow_route;
+
+	flow = flow_offload_alloc(ct, &route);
+	if (!flow)
+		goto err_flow_alloc;
+
+	ret = flow_offload_add(flowtable, flow);
+	if (ret < 0)
+		goto err_flow_add;
+
+	return;
+
+err_flow_add:
+	flow_offload_free(flow);
+err_flow_alloc:
+	dst_release(route.tuple[!dir].dst);
+err_flow_route:
+	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+out:
+	regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+				     const struct nft_expr *expr,
+				     const struct nft_data **data)
+{
+	unsigned int hook_mask = (1 << NF_INET_FORWARD);
+
+	return nft_chain_validate_hooks(ctx->chain, hook_mask);
+}
+
+static int nft_flow_offload_init(const struct nft_ctx *ctx,
+				 const struct nft_expr *expr,
+				 const struct nlattr * const tb[])
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+	u8 genmask = nft_genmask_next(ctx->net);
+	struct nft_flowtable *flowtable;
+
+	if (!tb[NFTA_FLOW_TABLE_NAME])
+		return -EINVAL;
+
+	flowtable = nf_tables_flowtable_lookup(ctx->table,
+					       tb[NFTA_FLOW_TABLE_NAME],
+					       genmask);
+	if (IS_ERR(flowtable))
+		return PTR_ERR(flowtable);
+
+	priv->flowtable = flowtable;
+	flowtable->use++;
+
+	return nf_ct_netns_get(ctx->net, ctx->afi->family);
+}
+
+static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
+				     const struct nft_expr *expr)
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+
+	priv->flowtable->use--;
+	nf_ct_netns_put(ctx->net, ctx->afi->family);
+}
+
+static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_flow_offload *priv = nft_expr_priv(expr);
+
+	if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_flow_offload_type;
+static const struct nft_expr_ops nft_flow_offload_ops = {
+	.type		= &nft_flow_offload_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
+	.eval		= nft_flow_offload_eval,
+	.init		= nft_flow_offload_init,
+	.destroy	= nft_flow_offload_destroy,
+	.validate	= nft_flow_offload_validate,
+	.dump		= nft_flow_offload_dump,
+};
+
+static struct nft_expr_type nft_flow_offload_type __read_mostly = {
+	.name		= "flow_offload",
+	.ops		= &nft_flow_offload_ops,
+	.maxattr	= NFTA_FLOW_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static void flow_offload_iterate_cleanup(struct flow_offload *flow, void *data)
+{
+	struct net_device *dev = data;
+
+	if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
+		return;
+
+	flow_offload_dead(flow);
+}
+
+static void nft_flow_offload_iterate_cleanup(struct nf_flowtable *flowtable,
+					     void *data)
+{
+	nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data);
+}
+
+static int flow_offload_netdev_event(struct notifier_block *this,
+				     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	if (event != NETDEV_DOWN)
+		return NOTIFY_DONE;
+
+	nft_flow_table_iterate(dev_net(dev), nft_flow_offload_iterate_cleanup, dev);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block flow_offload_netdev_notifier = {
+	.notifier_call	= flow_offload_netdev_event,
+};
+
+static int __init nft_flow_offload_module_init(void)
+{
+	int err;
+
+	register_netdevice_notifier(&flow_offload_netdev_notifier);
+
+	err = nft_register_expr(&nft_flow_offload_type);
+	if (err < 0)
+		goto register_expr;
+
+	return 0;
+
+register_expr:
+	unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+	return err;
+}
+
+static void __exit nft_flow_offload_module_exit(void)
+{
+	struct net *net;
+
+	nft_unregister_expr(&nft_flow_offload_type);
+	unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+	rtnl_lock();
+	for_each_net(net)
+		nft_flow_table_iterate(net, nft_flow_offload_iterate_cleanup, NULL);
+	rtnl_unlock();
+}
+
+module_init(nft_flow_offload_module_init);
+module_exit(nft_flow_offload_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("flow_offload");
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 5a60eb2..1a91e67 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -210,6 +210,11 @@ void nft_meta_get_eval(const struct nft_expr *expr,
 		*dest = prandom_u32_state(state);
 		break;
 	}
+#ifdef CONFIG_XFRM
+	case NFT_META_SECPATH:
+		nft_reg_store8(dest, !!skb->sp);
+		break;
+#endif
 	default:
 		WARN_ON(1);
 		goto err;
@@ -308,6 +313,11 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
 		prandom_init_once(&nft_prandom_state);
 		len = sizeof(u32);
 		break;
+#ifdef CONFIG_XFRM
+	case NFT_META_SECPATH:
+		len = sizeof(u8);
+		break;
+#endif
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -318,6 +328,38 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
 }
 EXPORT_SYMBOL_GPL(nft_meta_get_init);
 
+static int nft_meta_get_validate(const struct nft_ctx *ctx,
+				 const struct nft_expr *expr,
+				 const struct nft_data **data)
+{
+#ifdef CONFIG_XFRM
+	const struct nft_meta *priv = nft_expr_priv(expr);
+	unsigned int hooks;
+
+	if (priv->key != NFT_META_SECPATH)
+		return 0;
+
+	switch (ctx->afi->family) {
+	case NFPROTO_NETDEV:
+		hooks = 1 << NF_NETDEV_INGRESS;
+		break;
+	case NFPROTO_IPV4:
+	case NFPROTO_IPV6:
+	case NFPROTO_INET:
+		hooks = (1 << NF_INET_PRE_ROUTING) |
+			(1 << NF_INET_LOCAL_IN) |
+			(1 << NF_INET_FORWARD);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return nft_chain_validate_hooks(ctx->chain, hooks);
+#else
+	return 0;
+#endif
+}
+
 int nft_meta_set_validate(const struct nft_ctx *ctx,
 			  const struct nft_expr *expr,
 			  const struct nft_data **data)
@@ -434,6 +476,7 @@ static const struct nft_expr_ops nft_meta_get_ops = {
 	.eval		= nft_meta_get_eval,
 	.init		= nft_meta_get_init,
 	.dump		= nft_meta_get_dump,
+	.validate	= nft_meta_get_validate,
 };
 
 static const struct nft_expr_ops nft_meta_set_ops = {
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index a6b7d05..11a2071b 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -27,7 +27,7 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
 {
 	u32 minlen = sizeof(struct ipv6hdr), mtu = dst_mtu(skbdst);
 	const struct sk_buff *skb = pkt->skb;
-	const struct nf_afinfo *ai;
+	struct dst_entry *dst = NULL;
 	struct flowi fl;
 
 	memset(&fl, 0, sizeof(fl));
@@ -43,15 +43,10 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb
 		break;
 	}
 
-	ai = nf_get_afinfo(nft_pf(pkt));
-	if (ai) {
-		struct dst_entry *dst = NULL;
-
-		ai->route(nft_net(pkt), &dst, &fl, false);
-		if (dst) {
-			mtu = min(mtu, dst_mtu(dst));
-			dst_release(dst);
-		}
+	nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt));
+	if (dst) {
+		mtu = min(mtu, dst_mtu(dst));
+		dst_release(dst);
 	}
 
 	if (mtu <= minlen || mtu > 0xffff)
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index f8166c1..3f1624e 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -251,11 +251,7 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
 	if (err)
 		return;
 
-	err = rhashtable_walk_start(&hti);
-	if (err && err != -EAGAIN) {
-		iter->err = err;
-		goto out;
-	}
+	rhashtable_walk_start(&hti);
 
 	while ((he = rhashtable_walk_next(&hti))) {
 		if (IS_ERR(he)) {
@@ -306,9 +302,7 @@ static void nft_rhash_gc(struct work_struct *work)
 	if (err)
 		goto schedule;
 
-	err = rhashtable_walk_start(&hti);
-	if (err && err != -EAGAIN)
-		goto out;
+	rhashtable_walk_start(&hti);
 
 	while ((he = rhashtable_walk_next(&hti))) {
 		if (IS_ERR(he)) {
diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
new file mode 100644
index 0000000..0b660c5
--- /dev/null
+++ b/net/netfilter/utils.c
@@ -0,0 +1,90 @@
+#include <linux/kernel.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_queue.h>
+
+__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
+		    unsigned int dataoff, u_int8_t protocol,
+		    unsigned short family)
+{
+	const struct nf_ipv6_ops *v6ops;
+	__sum16 csum = 0;
+
+	switch (family) {
+	case AF_INET:
+		csum = nf_ip_checksum(skb, hook, dataoff, protocol);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			csum = v6ops->checksum(skb, hook, dataoff, protocol);
+		break;
+	}
+
+	return csum;
+}
+EXPORT_SYMBOL_GPL(nf_checksum);
+
+__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+			    unsigned int dataoff, unsigned int len,
+			    u_int8_t protocol, unsigned short family)
+{
+	const struct nf_ipv6_ops *v6ops;
+	__sum16 csum = 0;
+
+	switch (family) {
+	case AF_INET:
+		csum = nf_ip_checksum_partial(skb, hook, dataoff, len,
+					      protocol);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			csum = v6ops->checksum_partial(skb, hook, dataoff, len,
+						       protocol);
+		break;
+	}
+
+	return csum;
+}
+EXPORT_SYMBOL_GPL(nf_checksum_partial);
+
+int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
+	     bool strict, unsigned short family)
+{
+	const struct nf_ipv6_ops *v6ops;
+	int ret = 0;
+
+	switch (family) {
+	case AF_INET:
+		ret = nf_ip_route(net, dst, fl, strict);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			ret = v6ops->route(net, dst, fl, strict);
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nf_route);
+
+int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry)
+{
+	const struct nf_ipv6_ops *v6ops;
+	int ret = 0;
+
+	switch (entry->state.pf) {
+	case AF_INET:
+		ret = nf_ip_reroute(skb, entry);
+		break;
+	case AF_INET6:
+		v6ops = rcu_dereference(nf_ipv6_ops);
+		if (v6ops)
+			ret = v6ops->reroute(skb, entry);
+		break;
+	}
+	return ret;
+}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 55802e9..10c19a3 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1027,7 +1027,7 @@ void xt_free_table_info(struct xt_table_info *info)
 }
 EXPORT_SYMBOL(xt_free_table_info);
 
-/* Find table by name, grabs mutex & ref.  Returns NULL on error. */
+/* Find table by name, grabs mutex & ref.  Returns ERR_PTR on error. */
 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 				    const char *name)
 {
@@ -1043,17 +1043,17 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 
 	/* Table doesn't exist in this netns, re-try init */
 	list_for_each_entry(t, &init_net.xt.tables[af], list) {
+		int err;
+
 		if (strcmp(t->name, name))
 			continue;
-		if (!try_module_get(t->me)) {
-			mutex_unlock(&xt[af].mutex);
-			return NULL;
-		}
-
+		if (!try_module_get(t->me))
+			goto out;
 		mutex_unlock(&xt[af].mutex);
-		if (t->table_init(net) != 0) {
+		err = t->table_init(net);
+		if (err < 0) {
 			module_put(t->me);
-			return NULL;
+			return ERR_PTR(err);
 		}
 
 		found = t;
@@ -1073,10 +1073,28 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
 	module_put(found->me);
  out:
 	mutex_unlock(&xt[af].mutex);
-	return NULL;
+	return ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL_GPL(xt_find_table_lock);
 
+struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
+					    const char *name)
+{
+	struct xt_table *t = xt_find_table_lock(net, af, name);
+
+#ifdef CONFIG_MODULE
+	if (IS_ERR(t)) {
+		int err = request_module("%stable_%s", xt_prefix[af], name);
+		if (err)
+			return ERR_PTR(err);
+		t = xt_find_table_lock(net, af, name);
+	}
+#endif
+
+	return t;
+}
+EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
+
 void xt_table_unlock(struct xt_table *table)
 {
 	mutex_unlock(&xt[table->af].mutex);
@@ -1397,7 +1415,7 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
 		trav->curr = trav->curr->next;
 		if (trav->curr != trav->head)
 			break;
-		/* fallthru, _stop will unlock */
+		/* fall through */
 	default:
 		return NULL;
 	}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 9dae4d6..99bb8e4 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -48,7 +48,6 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net,
 				    unsigned int family)
 {
 	struct flowi fl;
-	const struct nf_afinfo *ai;
 	struct rtable *rt = NULL;
 	u_int32_t mtu     = ~0U;
 
@@ -62,10 +61,8 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net,
 		memset(fl6, 0, sizeof(*fl6));
 		fl6->daddr = ipv6_hdr(skb)->saddr;
 	}
-	ai = nf_get_afinfo(family);
-	if (ai != NULL)
-		ai->route(net, (struct dst_entry **)&rt, &fl, false);
 
+	nf_route(net, (struct dst_entry **)&rt, &fl, false, family);
 	if (rt != NULL) {
 		mtu = dst_mtu(&rt->dst);
 		dst_release(&rt->dst);
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 3b2be2a..911a7c0 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -36,7 +36,7 @@ MODULE_ALIAS("ip6t_addrtype");
 static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 			    const struct in6_addr *addr, u16 mask)
 {
-	const struct nf_afinfo *afinfo;
+	const struct nf_ipv6_ops *v6ops;
 	struct flowi6 flow;
 	struct rt6_info *rt;
 	u32 ret = 0;
@@ -47,17 +47,14 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 	if (dev)
 		flow.flowi6_oif = dev->ifindex;
 
-	afinfo = nf_get_afinfo(NFPROTO_IPV6);
-	if (afinfo != NULL) {
-		const struct nf_ipv6_ops *v6ops;
-
+	v6ops = nf_get_ipv6_ops();
+	if (v6ops) {
 		if (dev && (mask & XT_ADDRTYPE_LOCAL)) {
-			v6ops = nf_get_ipv6_ops();
-			if (v6ops && v6ops->chk_addr(net, addr, dev, true))
+			if (v6ops->chk_addr(net, addr, dev, true))
 				ret = XT_ADDRTYPE_LOCAL;
 		}
-		route_err = afinfo->route(net, (struct dst_entry **)&rt,
-					  flowi6_to_flowi(&flow), false);
+		route_err = v6ops->route(net, (struct dst_entry **)&rt,
+					 flowi6_to_flowi(&flow), false);
 	} else {
 		route_err = 1;
 	}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index a6214f2..b1b17b9 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -12,292 +12,30 @@
  * GPL (C) 1999  Rusty Russell (rusty@rustcorp.com.au).
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/jhash.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
+
 #include <linux/module.h>
-#include <linux/random.h>
 #include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/netfilter/nf_conntrack_tcp.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_connlimit.h>
+
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
 #include <net/netfilter/nf_conntrack_zones.h>
-
-#define CONNLIMIT_SLOTS		256U
-
-#ifdef CONFIG_LOCKDEP
-#define CONNLIMIT_LOCK_SLOTS	8U
-#else
-#define CONNLIMIT_LOCK_SLOTS	256U
-#endif
-
-#define CONNLIMIT_GC_MAX_NODES	8
-
-/* we will save the tuples of all connections we care about */
-struct xt_connlimit_conn {
-	struct hlist_node		node;
-	struct nf_conntrack_tuple	tuple;
-};
-
-struct xt_connlimit_rb {
-	struct rb_node node;
-	struct hlist_head hhead; /* connections/hosts in same subnet */
-	union nf_inet_addr addr; /* search key */
-};
-
-static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp;
-
-struct xt_connlimit_data {
-	struct rb_root climit_root[CONNLIMIT_SLOTS];
-};
-
-static u_int32_t connlimit_rnd __read_mostly;
-static struct kmem_cache *connlimit_rb_cachep __read_mostly;
-static struct kmem_cache *connlimit_conn_cachep __read_mostly;
-
-static inline unsigned int connlimit_iphash(__be32 addr)
-{
-	return jhash_1word((__force __u32)addr,
-			    connlimit_rnd) % CONNLIMIT_SLOTS;
-}
-
-static inline unsigned int
-connlimit_iphash6(const union nf_inet_addr *addr)
-{
-	return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6),
-		       connlimit_rnd) % CONNLIMIT_SLOTS;
-}
-
-static inline bool already_closed(const struct nf_conn *conn)
-{
-	if (nf_ct_protonum(conn) == IPPROTO_TCP)
-		return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
-		       conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
-	else
-		return 0;
-}
-
-static int
-same_source(const union nf_inet_addr *addr,
-	    const union nf_inet_addr *u3, u_int8_t family)
-{
-	if (family == NFPROTO_IPV4)
-		return ntohl(addr->ip) - ntohl(u3->ip);
-
-	return memcmp(addr->ip6, u3->ip6, sizeof(addr->ip6));
-}
-
-static bool add_hlist(struct hlist_head *head,
-		      const struct nf_conntrack_tuple *tuple,
-		      const union nf_inet_addr *addr)
-{
-	struct xt_connlimit_conn *conn;
-
-	conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
-	if (conn == NULL)
-		return false;
-	conn->tuple = *tuple;
-	hlist_add_head(&conn->node, head);
-	return true;
-}
-
-static unsigned int check_hlist(struct net *net,
-				struct hlist_head *head,
-				const struct nf_conntrack_tuple *tuple,
-				const struct nf_conntrack_zone *zone,
-				bool *addit)
-{
-	const struct nf_conntrack_tuple_hash *found;
-	struct xt_connlimit_conn *conn;
-	struct hlist_node *n;
-	struct nf_conn *found_ct;
-	unsigned int length = 0;
-
-	*addit = true;
-
-	/* check the saved connections */
-	hlist_for_each_entry_safe(conn, n, head, node) {
-		found = nf_conntrack_find_get(net, zone, &conn->tuple);
-		if (found == NULL) {
-			hlist_del(&conn->node);
-			kmem_cache_free(connlimit_conn_cachep, conn);
-			continue;
-		}
-
-		found_ct = nf_ct_tuplehash_to_ctrack(found);
-
-		if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
-			/*
-			 * Just to be sure we have it only once in the list.
-			 * We should not see tuples twice unless someone hooks
-			 * this into a table without "-p tcp --syn".
-			 */
-			*addit = false;
-		} else if (already_closed(found_ct)) {
-			/*
-			 * we do not care about connections which are
-			 * closed already -> ditch it
-			 */
-			nf_ct_put(found_ct);
-			hlist_del(&conn->node);
-			kmem_cache_free(connlimit_conn_cachep, conn);
-			continue;
-		}
-
-		nf_ct_put(found_ct);
-		length++;
-	}
-
-	return length;
-}
-
-static void tree_nodes_free(struct rb_root *root,
-			    struct xt_connlimit_rb *gc_nodes[],
-			    unsigned int gc_count)
-{
-	struct xt_connlimit_rb *rbconn;
-
-	while (gc_count) {
-		rbconn = gc_nodes[--gc_count];
-		rb_erase(&rbconn->node, root);
-		kmem_cache_free(connlimit_rb_cachep, rbconn);
-	}
-}
-
-static unsigned int
-count_tree(struct net *net, struct rb_root *root,
-	   const struct nf_conntrack_tuple *tuple,
-	   const union nf_inet_addr *addr,
-	   u8 family, const struct nf_conntrack_zone *zone)
-{
-	struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
-	struct rb_node **rbnode, *parent;
-	struct xt_connlimit_rb *rbconn;
-	struct xt_connlimit_conn *conn;
-	unsigned int gc_count;
-	bool no_gc = false;
-
- restart:
-	gc_count = 0;
-	parent = NULL;
-	rbnode = &(root->rb_node);
-	while (*rbnode) {
-		int diff;
-		bool addit;
-
-		rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
-
-		parent = *rbnode;
-		diff = same_source(addr, &rbconn->addr, family);
-		if (diff < 0) {
-			rbnode = &((*rbnode)->rb_left);
-		} else if (diff > 0) {
-			rbnode = &((*rbnode)->rb_right);
-		} else {
-			/* same source network -> be counted! */
-			unsigned int count;
-			count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
-
-			tree_nodes_free(root, gc_nodes, gc_count);
-			if (!addit)
-				return count;
-
-			if (!add_hlist(&rbconn->hhead, tuple, addr))
-				return 0; /* hotdrop */
-
-			return count + 1;
-		}
-
-		if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
-			continue;
-
-		/* only used for GC on hhead, retval and 'addit' ignored */
-		check_hlist(net, &rbconn->hhead, tuple, zone, &addit);
-		if (hlist_empty(&rbconn->hhead))
-			gc_nodes[gc_count++] = rbconn;
-	}
-
-	if (gc_count) {
-		no_gc = true;
-		tree_nodes_free(root, gc_nodes, gc_count);
-		/* tree_node_free before new allocation permits
-		 * allocator to re-use newly free'd object.
-		 *
-		 * This is a rare event; in most cases we will find
-		 * existing node to re-use. (or gc_count is 0).
-		 */
-		goto restart;
-	}
-
-	/* no match, need to insert new node */
-	rbconn = kmem_cache_alloc(connlimit_rb_cachep, GFP_ATOMIC);
-	if (rbconn == NULL)
-		return 0;
-
-	conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
-	if (conn == NULL) {
-		kmem_cache_free(connlimit_rb_cachep, rbconn);
-		return 0;
-	}
-
-	conn->tuple = *tuple;
-	rbconn->addr = *addr;
-
-	INIT_HLIST_HEAD(&rbconn->hhead);
-	hlist_add_head(&conn->node, &rbconn->hhead);
-
-	rb_link_node(&rbconn->node, parent, rbnode);
-	rb_insert_color(&rbconn->node, root);
-	return 1;
-}
-
-static int count_them(struct net *net,
-		      struct xt_connlimit_data *data,
-		      const struct nf_conntrack_tuple *tuple,
-		      const union nf_inet_addr *addr,
-		      u_int8_t family,
-		      const struct nf_conntrack_zone *zone)
-{
-	struct rb_root *root;
-	int count;
-	u32 hash;
-
-	if (family == NFPROTO_IPV6)
-		hash = connlimit_iphash6(addr);
-	else
-		hash = connlimit_iphash(addr->ip);
-	root = &data->climit_root[hash];
-
-	spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
-
-	count = count_tree(net, root, tuple, addr, family, zone);
-
-	spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]);
-
-	return count;
-}
+#include <net/netfilter/nf_conntrack_count.h>
 
 static bool
 connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	struct net *net = xt_net(par);
 	const struct xt_connlimit_info *info = par->matchinfo;
-	union nf_inet_addr addr;
 	struct nf_conntrack_tuple tuple;
 	const struct nf_conntrack_tuple *tuple_ptr = &tuple;
 	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
 	enum ip_conntrack_info ctinfo;
 	const struct nf_conn *ct;
 	unsigned int connections;
+	u32 key[5];
 
 	ct = nf_ct_get(skb, &ctinfo);
 	if (ct != NULL) {
@@ -310,6 +48,7 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
 	if (xt_family(par) == NFPROTO_IPV6) {
 		const struct ipv6hdr *iph = ipv6_hdr(skb);
+		union nf_inet_addr addr;
 		unsigned int i;
 
 		memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
@@ -317,22 +56,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 
 		for (i = 0; i < ARRAY_SIZE(addr.ip6); ++i)
 			addr.ip6[i] &= info->mask.ip6[i];
+		memcpy(key, &addr, sizeof(addr.ip6));
+		key[4] = zone->id;
 	} else {
 		const struct iphdr *iph = ip_hdr(skb);
-		addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+		key[0] = (info->flags & XT_CONNLIMIT_DADDR) ?
 			  iph->daddr : iph->saddr;
 
-		addr.ip &= info->mask.ip;
+		key[0] &= info->mask.ip;
+		key[1] = zone->id;
 	}
 
-	connections = count_them(net, info->data, tuple_ptr, &addr,
-				 xt_family(par), zone);
+	connections = nf_conncount_count(net, info->data, key,
+					 xt_family(par), tuple_ptr, zone);
 	if (connections == 0)
 		/* kmalloc failed, drop it entirely */
 		goto hotdrop;
 
-	return (connections > info->limit) ^
-	       !!(info->flags & XT_CONNLIMIT_INVERT);
+	return (connections > info->limit) ^ !!(info->flags & XT_CONNLIMIT_INVERT);
 
  hotdrop:
 	par->hotdrop = true;
@@ -342,61 +83,27 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
 static int connlimit_mt_check(const struct xt_mtchk_param *par)
 {
 	struct xt_connlimit_info *info = par->matchinfo;
-	unsigned int i;
-	int ret;
+	unsigned int keylen;
 
-	net_get_random_once(&connlimit_rnd, sizeof(connlimit_rnd));
-
-	ret = nf_ct_netns_get(par->net, par->family);
-	if (ret < 0) {
-		pr_info("cannot load conntrack support for "
-			"address family %u\n", par->family);
-		return ret;
-	}
+	keylen = sizeof(u32);
+	if (par->family == NFPROTO_IPV6)
+		keylen += sizeof(struct in6_addr);
+	else
+		keylen += sizeof(struct in_addr);
 
 	/* init private data */
-	info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL);
-	if (info->data == NULL) {
-		nf_ct_netns_put(par->net, par->family);
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
-		info->data->climit_root[i] = RB_ROOT;
+	info->data = nf_conncount_init(par->net, par->family, keylen);
+	if (IS_ERR(info->data))
+		return PTR_ERR(info->data);
 
 	return 0;
 }
 
-static void destroy_tree(struct rb_root *r)
-{
-	struct xt_connlimit_conn *conn;
-	struct xt_connlimit_rb *rbconn;
-	struct hlist_node *n;
-	struct rb_node *node;
-
-	while ((node = rb_first(r)) != NULL) {
-		rbconn = rb_entry(node, struct xt_connlimit_rb, node);
-
-		rb_erase(node, r);
-
-		hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
-			kmem_cache_free(connlimit_conn_cachep, conn);
-
-		kmem_cache_free(connlimit_rb_cachep, rbconn);
-	}
-}
-
 static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
 {
 	const struct xt_connlimit_info *info = par->matchinfo;
-	unsigned int i;
 
-	nf_ct_netns_put(par->net, par->family);
-
-	for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i)
-		destroy_tree(&info->data->climit_root[i]);
-
-	kfree(info->data);
+	nf_conncount_destroy(par->net, par->family, info->data);
 }
 
 static struct xt_match connlimit_mt_reg __read_mostly = {
@@ -413,40 +120,12 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
 
 static int __init connlimit_mt_init(void)
 {
-	int ret, i;
-
-	BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS);
-	BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0);
-
-	for (i = 0; i < CONNLIMIT_LOCK_SLOTS; ++i)
-		spin_lock_init(&xt_connlimit_locks[i]);
-
-	connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
-					   sizeof(struct xt_connlimit_conn),
-					   0, 0, NULL);
-	if (!connlimit_conn_cachep)
-		return -ENOMEM;
-
-	connlimit_rb_cachep = kmem_cache_create("xt_connlimit_rb",
-					   sizeof(struct xt_connlimit_rb),
-					   0, 0, NULL);
-	if (!connlimit_rb_cachep) {
-		kmem_cache_destroy(connlimit_conn_cachep);
-		return -ENOMEM;
-	}
-	ret = xt_register_match(&connlimit_mt_reg);
-	if (ret != 0) {
-		kmem_cache_destroy(connlimit_conn_cachep);
-		kmem_cache_destroy(connlimit_rb_cachep);
-	}
-	return ret;
+	return xt_register_match(&connlimit_mt_reg);
 }
 
 static void __exit connlimit_mt_exit(void)
 {
 	xt_unregister_match(&connlimit_mt_reg);
-	kmem_cache_destroy(connlimit_conn_cachep);
-	kmem_cache_destroy(connlimit_rb_cachep);
 }
 
 module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 2b4ab18..5639fb0 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -93,7 +93,8 @@ match_policy_out(const struct sk_buff *skb, const struct xt_policy_info *info,
 	if (dst->xfrm == NULL)
 		return -1;
 
-	for (i = 0; dst && dst->xfrm; dst = dst->child, i++) {
+	for (i = 0; dst && dst->xfrm;
+	     dst = ((struct xfrm_dst *)dst)->child, i++) {
 		pos = strict ? i : 0;
 		if (pos >= info->len)
 			return 0;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 6428570..16b6b11 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -39,13 +39,17 @@ match_set(ip_set_id_t index, const struct sk_buff *skb,
 	return inv;
 }
 
-#define ADT_OPT(n, f, d, fs, cfs, t)	\
-struct ip_set_adt_opt n = {		\
-	.family	= f,			\
-	.dim = d,			\
-	.flags = fs,			\
-	.cmdflags = cfs,		\
-	.ext.timeout = t,		\
+#define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo)	\
+struct ip_set_adt_opt n = {				\
+	.family	= f,					\
+	.dim = d,					\
+	.flags = fs,					\
+	.cmdflags = cfs,				\
+	.ext.timeout = t,				\
+	.ext.packets = p,				\
+	.ext.bytes = b,					\
+	.ext.packets_op = po,				\
+	.ext.bytes_op = bo,				\
 }
 
 /* Revision 0 interface: backward compatible with netfilter/iptables */
@@ -56,7 +60,8 @@ set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
 	const struct xt_set_info_match_v0 *info = par->matchinfo;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.u.compat.dim,
-		info->match_set.u.compat.flags, 0, UINT_MAX);
+		info->match_set.u.compat.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	return match_set(info->match_set.index, skb, par, &opt,
 			 info->match_set.u.compat.flags & IPSET_INV_MATCH);
@@ -119,7 +124,8 @@ set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
 	const struct xt_set_info_match_v1 *info = par->matchinfo;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.dim,
-		info->match_set.flags, 0, UINT_MAX);
+		info->match_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	if (opt.flags & IPSET_RETURN_NOMATCH)
 		opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
@@ -161,45 +167,21 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par)
 /* Revision 3 match */
 
 static bool
-match_counter0(u64 counter, const struct ip_set_counter_match0 *info)
-{
-	switch (info->op) {
-	case IPSET_COUNTER_NONE:
-		return true;
-	case IPSET_COUNTER_EQ:
-		return counter == info->value;
-	case IPSET_COUNTER_NE:
-		return counter != info->value;
-	case IPSET_COUNTER_LT:
-		return counter < info->value;
-	case IPSET_COUNTER_GT:
-		return counter > info->value;
-	}
-	return false;
-}
-
-static bool
 set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v3 *info = par->matchinfo;
-	int ret;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.dim,
-		info->match_set.flags, info->flags, UINT_MAX);
+		info->match_set.flags, info->flags, UINT_MAX,
+		info->packets.value, info->bytes.value,
+		info->packets.op, info->bytes.op);
 
 	if (info->packets.op != IPSET_COUNTER_NONE ||
 	    info->bytes.op != IPSET_COUNTER_NONE)
 		opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
 
-	ret = match_set(info->match_set.index, skb, par, &opt,
-			info->match_set.flags & IPSET_INV_MATCH);
-
-	if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
-		return ret;
-
-	if (!match_counter0(opt.ext.packets, &info->packets))
-		return false;
-	return match_counter0(opt.ext.bytes, &info->bytes);
+	return match_set(info->match_set.index, skb, par, &opt,
+			 info->match_set.flags & IPSET_INV_MATCH);
 }
 
 #define set_match_v3_checkentry	set_match_v1_checkentry
@@ -208,45 +190,21 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
 /* Revision 4 match */
 
 static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
-{
-	switch (info->op) {
-	case IPSET_COUNTER_NONE:
-		return true;
-	case IPSET_COUNTER_EQ:
-		return counter == info->value;
-	case IPSET_COUNTER_NE:
-		return counter != info->value;
-	case IPSET_COUNTER_LT:
-		return counter < info->value;
-	case IPSET_COUNTER_GT:
-		return counter > info->value;
-	}
-	return false;
-}
-
-static bool
 set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_set_info_match_v4 *info = par->matchinfo;
-	int ret;
 
 	ADT_OPT(opt, xt_family(par), info->match_set.dim,
-		info->match_set.flags, info->flags, UINT_MAX);
+		info->match_set.flags, info->flags, UINT_MAX,
+		info->packets.value, info->bytes.value,
+		info->packets.op, info->bytes.op);
 
 	if (info->packets.op != IPSET_COUNTER_NONE ||
 	    info->bytes.op != IPSET_COUNTER_NONE)
 		opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
 
-	ret = match_set(info->match_set.index, skb, par, &opt,
-			info->match_set.flags & IPSET_INV_MATCH);
-
-	if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
-		return ret;
-
-	if (!match_counter(opt.ext.packets, &info->packets))
-		return false;
-	return match_counter(opt.ext.bytes, &info->bytes);
+	return match_set(info->match_set.index, skb, par, &opt,
+			 info->match_set.flags & IPSET_INV_MATCH);
 }
 
 #define set_match_v4_checkentry	set_match_v1_checkentry
@@ -260,9 +218,11 @@ set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_set_info_target_v0 *info = par->targinfo;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.u.compat.dim,
-		info->add_set.u.compat.flags, 0, UINT_MAX);
+		info->add_set.u.compat.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.u.compat.dim,
-		info->del_set.u.compat.flags, 0, UINT_MAX);
+		info->del_set.u.compat.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_add(info->add_set.index, skb, par, &add_opt);
@@ -333,9 +293,11 @@ set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_set_info_target_v1 *info = par->targinfo;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
-		info->add_set.flags, 0, UINT_MAX);
+		info->add_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
+		info->del_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	if (info->add_set.index != IPSET_INVALID_ID)
 		ip_set_add(info->add_set.index, skb, par, &add_opt);
@@ -402,9 +364,11 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
 	const struct xt_set_info_target_v2 *info = par->targinfo;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
-		info->add_set.flags, info->flags, info->timeout);
+		info->add_set.flags, info->flags, info->timeout,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
+		info->del_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	/* Normalize to fit into jiffies */
 	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
@@ -432,11 +396,14 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
 	int ret;
 
 	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
-		info->add_set.flags, info->flags, info->timeout);
+		info->add_set.flags, info->flags, info->timeout,
+		0, 0, 0, 0);
 	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
-		info->del_set.flags, 0, UINT_MAX);
+		info->del_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 	ADT_OPT(map_opt, xt_family(par), info->map_set.dim,
-		info->map_set.flags, 0, UINT_MAX);
+		info->map_set.flags, 0, UINT_MAX,
+		0, 0, 0, 0);
 
 	/* Normalize to fit into jiffies */
 	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 79cc1bf..972bfe1 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -65,6 +65,7 @@
 #include <linux/net_namespace.h>
 
 #include <net/net_namespace.h>
+#include <net/netns/generic.h>
 #include <net/sock.h>
 #include <net/scm.h>
 #include <net/netlink.h>
@@ -145,8 +146,6 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 static BLOCKING_NOTIFIER_HEAD(netlink_chain);
 
-static DEFINE_SPINLOCK(netlink_tap_lock);
-static struct list_head netlink_tap_all __read_mostly;
 
 static const struct rhashtable_params netlink_rhashtable_params;
 
@@ -173,14 +172,24 @@ static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
 	return new;
 }
 
+static unsigned int netlink_tap_net_id;
+
+struct netlink_tap_net {
+	struct list_head netlink_tap_all;
+	struct mutex netlink_tap_lock;
+};
+
 int netlink_add_tap(struct netlink_tap *nt)
 {
+	struct net *net = dev_net(nt->dev);
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
 	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
 		return -EINVAL;
 
-	spin_lock(&netlink_tap_lock);
-	list_add_rcu(&nt->list, &netlink_tap_all);
-	spin_unlock(&netlink_tap_lock);
+	mutex_lock(&nn->netlink_tap_lock);
+	list_add_rcu(&nt->list, &nn->netlink_tap_all);
+	mutex_unlock(&nn->netlink_tap_lock);
 
 	__module_get(nt->module);
 
@@ -190,12 +199,14 @@ EXPORT_SYMBOL_GPL(netlink_add_tap);
 
 static int __netlink_remove_tap(struct netlink_tap *nt)
 {
+	struct net *net = dev_net(nt->dev);
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
 	bool found = false;
 	struct netlink_tap *tmp;
 
-	spin_lock(&netlink_tap_lock);
+	mutex_lock(&nn->netlink_tap_lock);
 
-	list_for_each_entry(tmp, &netlink_tap_all, list) {
+	list_for_each_entry(tmp, &nn->netlink_tap_all, list) {
 		if (nt == tmp) {
 			list_del_rcu(&nt->list);
 			found = true;
@@ -205,7 +216,7 @@ static int __netlink_remove_tap(struct netlink_tap *nt)
 
 	pr_warn("__netlink_remove_tap: %p not found\n", nt);
 out:
-	spin_unlock(&netlink_tap_lock);
+	mutex_unlock(&nn->netlink_tap_lock);
 
 	if (found)
 		module_put(nt->module);
@@ -224,6 +235,26 @@ int netlink_remove_tap(struct netlink_tap *nt)
 }
 EXPORT_SYMBOL_GPL(netlink_remove_tap);
 
+static __net_init int netlink_tap_init_net(struct net *net)
+{
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
+	INIT_LIST_HEAD(&nn->netlink_tap_all);
+	mutex_init(&nn->netlink_tap_lock);
+	return 0;
+}
+
+static void __net_exit netlink_tap_exit_net(struct net *net)
+{
+}
+
+static struct pernet_operations netlink_tap_net_ops = {
+	.init = netlink_tap_init_net,
+	.exit = netlink_tap_exit_net,
+	.id   = &netlink_tap_net_id,
+	.size = sizeof(struct netlink_tap_net),
+};
+
 static bool netlink_filter_tap(const struct sk_buff *skb)
 {
 	struct sock *sk = skb->sk;
@@ -277,7 +308,7 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
 	return ret;
 }
 
-static void __netlink_deliver_tap(struct sk_buff *skb)
+static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
 {
 	int ret;
 	struct netlink_tap *tmp;
@@ -285,19 +316,21 @@ static void __netlink_deliver_tap(struct sk_buff *skb)
 	if (!netlink_filter_tap(skb))
 		return;
 
-	list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
+	list_for_each_entry_rcu(tmp, &nn->netlink_tap_all, list) {
 		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
 		if (unlikely(ret))
 			break;
 	}
 }
 
-static void netlink_deliver_tap(struct sk_buff *skb)
+static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
 {
+	struct netlink_tap_net *nn = net_generic(net, netlink_tap_net_id);
+
 	rcu_read_lock();
 
-	if (unlikely(!list_empty(&netlink_tap_all)))
-		__netlink_deliver_tap(skb);
+	if (unlikely(!list_empty(&nn->netlink_tap_all)))
+		__netlink_deliver_tap(skb, nn);
 
 	rcu_read_unlock();
 }
@@ -306,7 +339,7 @@ static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
 				       struct sk_buff *skb)
 {
 	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
-		netlink_deliver_tap(skb);
+		netlink_deliver_tap(sock_net(dst), skb);
 }
 
 static void netlink_overrun(struct sock *sk)
@@ -1216,7 +1249,7 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
 {
 	int len = skb->len;
 
-	netlink_deliver_tap(skb);
+	netlink_deliver_tap(sock_net(sk), skb);
 
 	skb_queue_tail(&sk->sk_receive_queue, skb);
 	sk->sk_data_ready(sk);
@@ -2481,8 +2514,9 @@ static int netlink_walk_start(struct nl_seq_iter *iter)
 		return err;
 	}
 
-	err = rhashtable_walk_start(&iter->hti);
-	return err == -EAGAIN ? 0 : err;
+	rhashtable_walk_start(&iter->hti);
+
+	return 0;
 }
 
 static void netlink_walk_stop(struct nl_seq_iter *iter)
@@ -2733,12 +2767,11 @@ static int __init netlink_proto_init(void)
 		}
 	}
 
-	INIT_LIST_HEAD(&netlink_tap_all);
-
 	netlink_add_usersock_entry();
 
 	sock_register(&netlink_family_ops);
 	register_pernet_subsys(&netlink_net_ops);
+	register_pernet_subsys(&netlink_tap_net_ops);
 	/* The netlink device handler may be needed early. */
 	rtnetlink_init();
 out:
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 8faa20b..7dda33b 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -115,11 +115,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
 	if (!s_num)
 		rhashtable_walk_enter(&tbl->hash, hti);
 
-	ret = rhashtable_walk_start(hti);
-	if (ret == -EAGAIN)
-		ret = 0;
-	if (ret)
-		goto stop;
+	rhashtable_walk_start(hti);
 
 	while ((nlsk = rhashtable_walk_next(hti))) {
 		if (IS_ERR(nlsk)) {
@@ -146,8 +142,8 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
 		}
 	}
 
-stop:
 	rhashtable_walk_stop(hti);
+
 	if (ret)
 		goto done;
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index b27c5c6..62f36cc9 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1266,14 +1266,14 @@ static int parse_nat(const struct nlattr *attr,
 		/* Do not allow flags if no type is given. */
 		if (info->range.flags) {
 			OVS_NLERR(log,
-				  "NAT flags may be given only when NAT range (SRC or DST) is also specified.\n"
+				  "NAT flags may be given only when NAT range (SRC or DST) is also specified."
 				  );
 			return -EINVAL;
 		}
 		info->nat = OVS_CT_NAT;   /* NAT existing connections. */
 	} else if (!info->commit) {
 		OVS_NLERR(log,
-			  "NAT attributes may be specified only when CT COMMIT flag is also specified.\n"
+			  "NAT attributes may be specified only when CT COMMIT flag is also specified."
 			  );
 		return -EINVAL;
 	}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index f039064..56b8e71 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -56,12 +56,12 @@
 
 u64 ovs_flow_used_time(unsigned long flow_jiffies)
 {
-	struct timespec cur_ts;
+	struct timespec64 cur_ts;
 	u64 cur_ms, idle_ms;
 
-	ktime_get_ts(&cur_ts);
+	ktime_get_ts64(&cur_ts);
 	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
-	cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
+	cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC +
 		 cur_ts.tv_nsec / NSEC_PER_MSEC;
 
 	return cur_ms - idle_ms;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624ea74..bce1f78 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -644,12 +644,12 @@ static int erspan_tun_opt_from_nlattr(const struct nlattr *attr,
 	BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
 
 	memset(&opts, 0, sizeof(opts));
-	opts.index = nla_get_be32(attr);
+	opts.u.index = nla_get_be32(attr);
 
 	/* Index has only 20-bit */
-	if (ntohl(opts.index) & ~INDEX_MASK) {
+	if (ntohl(opts.u.index) & ~INDEX_MASK) {
 		OVS_NLERR(log, "ERSPAN index number %x too large.",
-			  ntohl(opts.index));
+			  ntohl(opts.u.index));
 		return -EINVAL;
 	}
 
@@ -907,7 +907,7 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb,
 			return -EMSGSIZE;
 		else if (output->tun_flags & TUNNEL_ERSPAN_OPT &&
 			 nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
-				      ((struct erspan_metadata *)tun_opts)->index))
+				      ((struct erspan_metadata *)tun_opts)->u.index))
 			return -EMSGSIZE;
 	}
 
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 04a3128..bb95c43 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -16,7 +16,6 @@
  * 02110-1301, USA
  */
 
-#include <linux/hardirq.h>
 #include <linux/if_vlan.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
@@ -126,18 +125,12 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 	}
 }
 
-static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
-{
-	dev->needed_headroom = new_hr < 0 ? 0 : new_hr;
-}
-
 static const struct net_device_ops internal_dev_netdev_ops = {
 	.ndo_open = internal_dev_open,
 	.ndo_stop = internal_dev_stop,
 	.ndo_start_xmit = internal_dev_xmit,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_get_stats64 = internal_get_stats,
-	.ndo_set_rx_headroom = internal_set_rx_headroom,
 };
 
 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -154,7 +147,7 @@ static void do_setup(struct net_device *netdev)
 
 	netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
-			      IFF_PHONY_HEADROOM | IFF_NO_QUEUE;
+			      IFF_NO_QUEUE;
 	netdev->needs_free_netdev = true;
 	netdev->priv_destructor = internal_dev_destructor;
 	netdev->ethtool_ops = &internal_dev_ethtool_ops;
@@ -195,7 +188,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 		err = -ENOMEM;
 		goto error_free_netdev;
 	}
-	vport->dev->needed_headroom = vport->dp->max_headroom;
 
 	dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
 	internal_dev = internal_dev_priv(vport->dev);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index da215e5..ee7aa0b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
 	struct sk_buff *orig_skb = skb;
 	struct netdev_queue *txq;
 	int ret = NETDEV_TX_BUSY;
+	bool again = false;
 
 	if (unlikely(!netif_running(dev) ||
 		     !netif_carrier_ok(dev)))
 		goto drop;
 
-	skb = validate_xmit_skb_list(skb, dev);
+	skb = validate_xmit_skb_list(skb, dev, &again);
 	if (skb != orig_skb)
 		goto drop;
 
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index da754fc..871eaf2 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -299,16 +299,21 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 
 int __init phonet_netlink_register(void)
 {
-	int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit,
-				  NULL, 0);
+	int err = rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWADDR,
+				       addr_doit, NULL, 0);
 	if (err)
 		return err;
 
-	/* Further __rtnl_register() cannot fail */
-	__rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, 0);
-	__rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, 0);
-	__rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, 0);
-	__rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, 0);
-	__rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, 0);
+	/* Further rtnl_register_module() cannot fail */
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELADDR,
+			     addr_doit, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETADDR,
+			     NULL, getaddr_dumpit, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWROUTE,
+			     route_doit, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELROUTE,
+			     route_doit, NULL, 0);
+	rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETROUTE,
+			     NULL, route_dumpit, 0);
 	return 0;
 }
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 77ab05e..5fb3929e 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1116,9 +1116,13 @@ static int __init qrtr_proto_init(void)
 		return rc;
 	}
 
-	rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
+	rc = rtnl_register_module(THIS_MODULE, PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
+	if (rc) {
+		sock_unregister(qrtr_family.family);
+		proto_unregister(&qrtr_proto);
+	}
 
-	return 0;
+	return rc;
 }
 postcore_initcall(qrtr_proto_init);
 
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 75d43dc..5aa3a64 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
 			  rs, &addr, (int)ntohs(*port));
 			break;
 		} else {
+			rs->rs_bound_addr = 0;
 			rds_sock_put(rs);
 			ret = -ENOMEM;
 			break;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 8398fee..8d19fd2 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -219,7 +219,11 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
 	spin_lock_irqsave(&rds_cong_lock, flags);
 
 	list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
-		if (!test_and_set_bit(0, &conn->c_map_queued)) {
+		struct rds_conn_path *cp = &conn->c_path[0];
+
+		rcu_read_lock();
+		if (!test_and_set_bit(0, &conn->c_map_queued) &&
+		    !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
 			rds_stats_inc(s_cong_update_queued);
 			/* We cannot inline the call to rds_send_xmit() here
 			 * for two reasons (both pertaining to a TCP transport):
@@ -235,9 +239,9 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
 			 *    therefore trigger warnings.
 			 * Defer the xmit to rds_send_worker() instead.
 			 */
-			queue_delayed_work(rds_wq,
-					   &conn->c_path[0].cp_send_w, 0);
+			queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
 		}
+		rcu_read_unlock();
 	}
 
 	spin_unlock_irqrestore(&rds_cong_lock, flags);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 7ee2d5d..b10c0ef 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -230,8 +230,8 @@ static struct rds_connection *__rds_conn_create(struct net *net,
 
 	rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
 	  conn, &laddr, &faddr,
-	  trans->t_name ? trans->t_name : "[unknown]",
-	  is_outgoing ? "(outgoing)" : "");
+	  strnlen(trans->t_name, sizeof(trans->t_name)) ? trans->t_name :
+	  "[unknown]", is_outgoing ? "(outgoing)" : "");
 
 	/*
 	 * Since we ran without holding the conn lock, someone could
@@ -382,10 +382,13 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
 {
 	struct rds_message *rm, *rtmp;
 
+	set_bit(RDS_DESTROY_PENDING, &cp->cp_flags);
+
 	if (!cp->cp_transport_data)
 		return;
 
 	/* make sure lingering queued work won't try to ref the conn */
+	synchronize_rcu();
 	cancel_delayed_work_sync(&cp->cp_send_w);
 	cancel_delayed_work_sync(&cp->cp_recv_w);
 
@@ -403,6 +406,11 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
 	if (cp->cp_xmit_rm)
 		rds_message_put(cp->cp_xmit_rm);
 
+	WARN_ON(delayed_work_pending(&cp->cp_send_w));
+	WARN_ON(delayed_work_pending(&cp->cp_recv_w));
+	WARN_ON(delayed_work_pending(&cp->cp_conn_w));
+	WARN_ON(work_pending(&cp->cp_down_w));
+
 	cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
 }
 
@@ -424,7 +432,6 @@ void rds_conn_destroy(struct rds_connection *conn)
 		 "%pI4\n", conn, &conn->c_laddr,
 		 &conn->c_faddr);
 
-	conn->c_destroy_in_prog = 1;
 	/* Ensure conn will not be scheduled for reconnect */
 	spin_lock_irq(&rds_conn_lock);
 	hlist_del_init_rcu(&conn->c_hash_node);
@@ -445,7 +452,6 @@ void rds_conn_destroy(struct rds_connection *conn)
 	 */
 	rds_cong_remove_conn(conn);
 
-	put_net(conn->c_net);
 	kfree(conn->c_path);
 	kmem_cache_free(rds_conn_slab, conn);
 
@@ -684,10 +690,13 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
 {
 	atomic_set(&cp->cp_state, RDS_CONN_ERROR);
 
-	if (!destroy && cp->cp_conn->c_destroy_in_prog)
+	rcu_read_lock();
+	if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		rcu_read_unlock();
 		return;
-
+	}
 	queue_work(rds_wq, &cp->cp_down_w);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_drop);
 
@@ -704,9 +713,15 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
  */
 void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
 {
+	rcu_read_lock();
+	if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		rcu_read_unlock();
+		return;
+	}
 	if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
 	    !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
 		queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
 
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c349c71b..374ae83 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -88,6 +88,7 @@ enum {
 #define RDS_RECONNECT_PENDING	1
 #define RDS_IN_XMIT		2
 #define RDS_RECV_REFILL		3
+#define	RDS_DESTROY_PENDING	4
 
 /* Max number of multipaths per RDS connection. Must be a power of 2 */
 #define	RDS_MPATH_WORKERS	8
@@ -139,8 +140,7 @@ struct rds_connection {
 	__be32			c_faddr;
 	unsigned int		c_loopback:1,
 				c_ping_triggered:1,
-				c_destroy_in_prog:1,
-				c_pad_to_32:29;
+				c_pad_to_32:30;
 	int			c_npaths;
 	struct rds_connection	*c_passive;
 	struct rds_transport	*c_trans;
@@ -150,7 +150,7 @@ struct rds_connection {
 
 	/* Protocol version */
 	unsigned int		c_version;
-	struct net		*c_net;
+	possible_net_t		c_net;
 
 	struct list_head	c_map_item;
 	unsigned long		c_map_queued;
@@ -165,13 +165,13 @@ struct rds_connection {
 static inline
 struct net *rds_conn_net(struct rds_connection *conn)
 {
-	return conn->c_net;
+	return read_pnet(&conn->c_net);
 }
 
 static inline
 void rds_conn_net_set(struct rds_connection *conn, struct net *net)
 {
-	conn->c_net = get_net(net);
+	write_pnet(&conn->c_net, net);
 }
 
 #define RDS_FLAG_CONG_BITMAP	0x01
diff --git a/net/rds/send.c b/net/rds/send.c
index f72466c..d3e32d1 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -162,6 +162,12 @@ int rds_send_xmit(struct rds_conn_path *cp)
 		goto out;
 	}
 
+	if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		release_in_xmit(cp);
+		ret = -ENETUNREACH; /* dont requeue send work */
+		goto out;
+	}
+
 	/*
 	 * we record the send generation after doing the xmit acquire.
 	 * if someone else manages to jump in and do some work, we'll use
@@ -437,7 +443,12 @@ int rds_send_xmit(struct rds_conn_path *cp)
 		    !list_empty(&cp->cp_send_queue)) && !raced) {
 			if (batch_count < send_batch_count)
 				goto restart;
-			queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+			rcu_read_lock();
+			if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+				ret = -ENETUNREACH;
+			else
+				queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+			rcu_read_unlock();
 		} else if (raced) {
 			rds_stats_inc(s_send_lock_queue_raced);
 		}
@@ -1151,6 +1162,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 	else
 		cpath = &conn->c_path[0];
 
+	if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
 	rds_conn_path_connect_if_down(cpath);
 
 	ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
@@ -1190,9 +1206,17 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 	rds_stats_inc(s_send_queued);
 
 	ret = rds_send_xmit(cpath);
-	if (ret == -ENOMEM || ret == -EAGAIN)
-		queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
-
+	if (ret == -ENOMEM || ret == -EAGAIN) {
+		ret = 0;
+		rcu_read_lock();
+		if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
+			ret = -ENETUNREACH;
+		else
+			queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
+		rcu_read_unlock();
+	}
+	if (ret)
+		goto out;
 	rds_message_put(rm);
 	return payload_len;
 
@@ -1270,7 +1294,10 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
 	rds_stats_inc(s_send_pong);
 
 	/* schedule the send work on rds_wq */
-	queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+	rcu_read_lock();
+	if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+		queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+	rcu_read_unlock();
 
 	rds_message_put(rm);
 	return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 6b7ee71..2e554ef 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -270,16 +270,33 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
 	return -EADDRNOTAVAIL;
 }
 
+static void rds_tcp_conn_free(void *arg)
+{
+	struct rds_tcp_connection *tc = arg;
+	unsigned long flags;
+
+	rdsdebug("freeing tc %p\n", tc);
+
+	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+	if (!tc->t_tcp_node_detached)
+		list_del(&tc->t_tcp_node);
+	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
+	kmem_cache_free(rds_tcp_conn_slab, tc);
+}
+
 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 {
 	struct rds_tcp_connection *tc;
-	int i;
+	int i, j;
+	int ret = 0;
 
 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
 		tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
-		if (!tc)
-			return -ENOMEM;
-
+		if (!tc) {
+			ret = -ENOMEM;
+			break;
+		}
 		mutex_init(&tc->t_conn_path_lock);
 		tc->t_sock = NULL;
 		tc->t_tinc = NULL;
@@ -290,26 +307,17 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
 		tc->t_cpath = &conn->c_path[i];
 
 		spin_lock_irq(&rds_tcp_conn_lock);
+		tc->t_tcp_node_detached = false;
 		list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
 		spin_unlock_irq(&rds_tcp_conn_lock);
 		rdsdebug("rds_conn_path [%d] tc %p\n", i,
 			 conn->c_path[i].cp_transport_data);
 	}
-
-	return 0;
-}
-
-static void rds_tcp_conn_free(void *arg)
-{
-	struct rds_tcp_connection *tc = arg;
-	unsigned long flags;
-	rdsdebug("freeing tc %p\n", tc);
-
-	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
-	list_del(&tc->t_tcp_node);
-	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
-
-	kmem_cache_free(rds_tcp_conn_slab, tc);
+	if (ret) {
+		for (j = 0; j < i; j++)
+			rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
+	}
+	return ret;
 }
 
 static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
@@ -495,27 +503,6 @@ static struct pernet_operations rds_tcp_net_ops = {
 	.size = sizeof(struct rds_tcp_net),
 };
 
-/* explicitly send a RST on each socket, thereby releasing any socket refcnts
- * that may otherwise hold up netns deletion.
- */
-static void rds_tcp_conn_paths_destroy(struct rds_connection *conn)
-{
-	struct rds_conn_path *cp;
-	struct rds_tcp_connection *tc;
-	int i;
-	struct sock *sk;
-
-	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
-		cp = &conn->c_path[i];
-		tc = cp->cp_transport_data;
-		if (!tc->t_sock)
-			continue;
-		sk = tc->t_sock->sk;
-		sk->sk_prot->disconnect(sk, 0);
-		tcp_done(sk);
-	}
-}
-
 static void rds_tcp_kill_sock(struct net *net)
 {
 	struct rds_tcp_connection *tc, *_tc;
@@ -527,18 +514,20 @@ static void rds_tcp_kill_sock(struct net *net)
 	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
 	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
-		struct net *c_net = tc->t_cpath->cp_conn->c_net;
+		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
 		if (net != c_net || !tc->t_sock)
 			continue;
-		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
+		if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
 			list_move_tail(&tc->t_tcp_node, &tmp_list);
+		} else {
+			list_del(&tc->t_tcp_node);
+			tc->t_tcp_node_detached = true;
+		}
 	}
 	spin_unlock_irq(&rds_tcp_conn_lock);
-	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
-		rds_tcp_conn_paths_destroy(tc->t_cpath->cp_conn);
+	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
 		rds_conn_destroy(tc->t_cpath->cp_conn);
-	}
 }
 
 void *rds_tcp_listen_sock_def_readable(struct net *net)
@@ -586,7 +575,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
 
 	spin_lock_irq(&rds_tcp_conn_lock);
 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
-		struct net *c_net = tc->t_cpath->cp_conn->c_net;
+		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
 
 		if (net != c_net || !tc->t_sock)
 			continue;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 1aafbf7..e7858ee 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -12,6 +12,7 @@ struct rds_tcp_incoming {
 struct rds_tcp_connection {
 
 	struct list_head	t_tcp_node;
+	bool			t_tcp_node_detached;
 	struct rds_conn_path	*t_cpath;
 	/* t_conn_path_lock synchronizes the connection establishment between
 	 * rds_tcp_accept_one and rds_tcp_conn_path_connect
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 46f74da..534c67a 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
 		 cp->cp_conn, tc, sock);
 
 	if (sock) {
-		if (cp->cp_conn->c_destroy_in_prog)
+		if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
 			rds_tcp_set_linger(sock);
 		sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
 		lock_sock(sock->sk);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e006ef8..dd707b9 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -321,8 +321,12 @@ void rds_tcp_data_ready(struct sock *sk)
 	ready = tc->t_orig_data_ready;
 	rds_tcp_stats_inc(s_tcp_data_ready_calls);
 
-	if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM)
-		queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+	if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
+		rcu_read_lock();
+		if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+			queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+		rcu_read_unlock();
+	}
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
 	ready(sk);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index dc860d1..73c7476 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -202,8 +202,11 @@ void rds_tcp_write_space(struct sock *sk)
 	tc->t_last_seen_una = rds_tcp_snd_una(tc);
 	rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
-	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+	rcu_read_lock();
+	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
+	    !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
 		queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+	rcu_read_unlock();
 
 out:
 	read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/rds/threads.c b/net/rds/threads.c
index f121daa..eb76db1 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -87,8 +87,12 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
 
 	cp->cp_reconnect_jiffies = 0;
 	set_bit(0, &cp->cp_conn->c_map_queued);
-	queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
-	queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+	rcu_read_lock();
+	if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+		queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+		queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+	}
+	rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(rds_connect_path_complete);
 
@@ -133,7 +137,10 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
 	set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
 	if (cp->cp_reconnect_jiffies == 0) {
 		cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
-		queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+		rcu_read_lock();
+		if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+			queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+		rcu_read_unlock();
 		return;
 	}
 
@@ -141,8 +148,11 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
 	rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
 		 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
 		 conn, &conn->c_laddr, &conn->c_faddr);
-	queue_delayed_work(rds_wq, &cp->cp_conn_w,
-			   rand % cp->cp_reconnect_jiffies);
+	rcu_read_lock();
+	if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+		queue_delayed_work(rds_wq, &cp->cp_conn_w,
+				   rand % cp->cp_reconnect_jiffies);
+	rcu_read_unlock();
 
 	cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
 					rds_sysctl_reconnect_max_jiffies);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 4d33a50..52622a3 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -99,7 +99,7 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 		p->tcfa_refcnt--;
 		if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) {
 			if (p->ops->cleanup)
-				p->ops->cleanup(p, bind);
+				p->ops->cleanup(p);
 			tcf_idr_remove(p->idrinfo, p);
 			ret = ACT_P_DELETED;
 		}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 5ef8ce8c..b3f2c15 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -357,7 +357,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_bpf_cleanup(struct tc_action *act, int bind)
+static void tcf_bpf_cleanup(struct tc_action *act)
 {
 	struct tcf_bpf_cfg tmp;
 
@@ -401,16 +401,14 @@ static __net_init int bpf_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_bpf_ops);
 }
 
-static void __net_exit bpf_exit_net(struct net *net)
+static void __net_exit bpf_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, bpf_net_id);
 }
 
 static struct pernet_operations bpf_net_ops = {
 	.init = bpf_init_net,
-	.exit = bpf_exit_net,
+	.exit_batch = bpf_exit_net,
 	.id   = &bpf_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 10b7a88..2b15ba8 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -209,16 +209,14 @@ static __net_init int connmark_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_connmark_ops);
 }
 
-static void __net_exit connmark_exit_net(struct net *net)
+static void __net_exit connmark_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, connmark_net_id);
 }
 
 static struct pernet_operations connmark_net_ops = {
 	.init = connmark_init_net,
-	.exit = connmark_exit_net,
+	.exit_batch = connmark_exit_net,
 	.id   = &connmark_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index d836f99..af4b8ec 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -635,16 +635,14 @@ static __net_init int csum_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_csum_ops);
 }
 
-static void __net_exit csum_exit_net(struct net *net)
+static void __net_exit csum_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, csum_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, csum_net_id);
 }
 
 static struct pernet_operations csum_net_ops = {
 	.init = csum_init_net,
-	.exit = csum_exit_net,
+	.exit_batch = csum_exit_net,
 	.id   = &csum_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index a0ac42b..b56986d 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -235,16 +235,14 @@ static __net_init int gact_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_gact_ops);
 }
 
-static void __net_exit gact_exit_net(struct net *net)
+static void __net_exit gact_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, gact_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, gact_net_id);
 }
 
 static struct pernet_operations gact_net_ops = {
 	.init = gact_init_net,
-	.exit = gact_exit_net,
+	.exit_batch = gact_exit_net,
 	.id   = &gact_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 3007cb1..5954e99 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -387,7 +387,7 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
 }
 
 /* under ife->tcf_lock */
-static void _tcf_ife_cleanup(struct tc_action *a, int bind)
+static void _tcf_ife_cleanup(struct tc_action *a)
 {
 	struct tcf_ife_info *ife = to_ife(a);
 	struct tcf_meta_info *e, *n;
@@ -405,13 +405,13 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
 	}
 }
 
-static void tcf_ife_cleanup(struct tc_action *a, int bind)
+static void tcf_ife_cleanup(struct tc_action *a)
 {
 	struct tcf_ife_info *ife = to_ife(a);
 	struct tcf_ife_params *p;
 
 	spin_lock_bh(&ife->tcf_lock);
-	_tcf_ife_cleanup(a, bind);
+	_tcf_ife_cleanup(a);
 	spin_unlock_bh(&ife->tcf_lock);
 
 	p = rcu_dereference_protected(ife->params, 1);
@@ -546,7 +546,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
 			if (exists)
 				tcf_idr_release(*a, bind);
 			if (ret == ACT_P_CREATED)
-				_tcf_ife_cleanup(*a, bind);
+				_tcf_ife_cleanup(*a);
 
 			if (exists)
 				spin_unlock_bh(&ife->tcf_lock);
@@ -567,7 +567,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
 		err = use_all_metadata(ife);
 		if (err) {
 			if (ret == ACT_P_CREATED)
-				_tcf_ife_cleanup(*a, bind);
+				_tcf_ife_cleanup(*a);
 
 			if (exists)
 				spin_unlock_bh(&ife->tcf_lock);
@@ -858,16 +858,14 @@ static __net_init int ife_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_ife_ops);
 }
 
-static void __net_exit ife_exit_net(struct net *net)
+static void __net_exit ife_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, ife_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, ife_net_id);
 }
 
 static struct pernet_operations ife_net_ops = {
 	.init = ife_init_net,
-	.exit = ife_exit_net,
+	.exit_batch = ife_exit_net,
 	.id   = &ife_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d9e399a..06e380a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -77,7 +77,7 @@ static void ipt_destroy_target(struct xt_entry_target *t)
 	module_put(par.target->me);
 }
 
-static void tcf_ipt_release(struct tc_action *a, int bind)
+static void tcf_ipt_release(struct tc_action *a)
 {
 	struct tcf_ipt *ipt = to_ipt(a);
 	ipt_destroy_target(ipt->tcfi_t);
@@ -337,16 +337,14 @@ static __net_init int ipt_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_ipt_ops);
 }
 
-static void __net_exit ipt_exit_net(struct net *net)
+static void __net_exit ipt_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, ipt_net_id);
 }
 
 static struct pernet_operations ipt_net_ops = {
 	.init = ipt_init_net,
-	.exit = ipt_exit_net,
+	.exit_batch = ipt_exit_net,
 	.id   = &ipt_net_id,
 	.size = sizeof(struct tc_action_net),
 };
@@ -387,16 +385,14 @@ static __net_init int xt_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_xt_ops);
 }
 
-static void __net_exit xt_exit_net(struct net *net)
+static void __net_exit xt_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, xt_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, xt_net_id);
 }
 
 static struct pernet_operations xt_net_ops = {
 	.init = xt_init_net,
-	.exit = xt_exit_net,
+	.exit_batch = xt_exit_net,
 	.id   = &xt_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 08b6184..e6ff88f 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -29,7 +29,6 @@
 #include <net/tc_act/tc_mirred.h>
 
 static LIST_HEAD(mirred_list);
-static DEFINE_SPINLOCK(mirred_list_lock);
 
 static bool tcf_mirred_is_act_redirect(int action)
 {
@@ -50,18 +49,15 @@ static bool tcf_mirred_act_wants_ingress(int action)
 	}
 }
 
-static void tcf_mirred_release(struct tc_action *a, int bind)
+static void tcf_mirred_release(struct tc_action *a)
 {
 	struct tcf_mirred *m = to_mirred(a);
 	struct net_device *dev;
 
-	/* We could be called either in a RCU callback or with RTNL lock held. */
-	spin_lock_bh(&mirred_list_lock);
 	list_del(&m->tcfm_list);
-	dev = rcu_dereference_protected(m->tcfm_dev, 1);
+	dev = rtnl_dereference(m->tcfm_dev);
 	if (dev)
 		dev_put(dev);
-	spin_unlock_bh(&mirred_list_lock);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -139,8 +135,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
 	m->tcf_action = parm->action;
 	m->tcfm_eaction = parm->eaction;
 	if (dev != NULL) {
-		m->tcfm_ifindex = parm->ifindex;
-		m->net = net;
 		if (ret != ACT_P_CREATED)
 			dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
 		dev_hold(dev);
@@ -149,9 +143,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
 	}
 
 	if (ret == ACT_P_CREATED) {
-		spin_lock_bh(&mirred_list_lock);
 		list_add(&m->tcfm_list, &mirred_list);
-		spin_unlock_bh(&mirred_list_lock);
 		tcf_idr_insert(tn, *a);
 	}
 
@@ -247,13 +239,14 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_mirred *m = to_mirred(a);
+	struct net_device *dev = rtnl_dereference(m->tcfm_dev);
 	struct tc_mirred opt = {
 		.index   = m->tcf_index,
 		.action  = m->tcf_action,
 		.refcnt  = m->tcf_refcnt - ref,
 		.bindcnt = m->tcf_bindcnt - bind,
 		.eaction = m->tcfm_eaction,
-		.ifindex = m->tcfm_ifindex,
+		.ifindex = dev ? dev->ifindex : 0,
 	};
 	struct tcf_t t;
 
@@ -294,7 +287,6 @@ static int mirred_device_event(struct notifier_block *unused,
 
 	ASSERT_RTNL();
 	if (event == NETDEV_UNREGISTER) {
-		spin_lock_bh(&mirred_list_lock);
 		list_for_each_entry(m, &mirred_list, tcfm_list) {
 			if (rcu_access_pointer(m->tcfm_dev) == dev) {
 				dev_put(dev);
@@ -304,7 +296,6 @@ static int mirred_device_event(struct notifier_block *unused,
 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
 			}
 		}
-		spin_unlock_bh(&mirred_list_lock);
 	}
 
 	return NOTIFY_DONE;
@@ -318,7 +309,7 @@ static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
 {
 	struct tcf_mirred *m = to_mirred(a);
 
-	return __dev_get_by_index(m->net, m->tcfm_ifindex);
+	return rtnl_dereference(m->tcfm_dev);
 }
 
 static struct tc_action_ops act_mirred_ops = {
@@ -343,16 +334,14 @@ static __net_init int mirred_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_mirred_ops);
 }
 
-static void __net_exit mirred_exit_net(struct net *net)
+static void __net_exit mirred_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, mirred_net_id);
 }
 
 static struct pernet_operations mirred_net_ops = {
 	.init = mirred_init_net,
-	.exit = mirred_exit_net,
+	.exit_batch = mirred_exit_net,
 	.id   = &mirred_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index c365d01..98c6a4b 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -310,16 +310,14 @@ static __net_init int nat_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_nat_ops);
 }
 
-static void __net_exit nat_exit_net(struct net *net)
+static void __net_exit nat_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, nat_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, nat_net_id);
 }
 
 static struct pernet_operations nat_net_ops = {
 	.init = nat_init_net,
-	.exit = nat_exit_net,
+	.exit_batch = nat_exit_net,
 	.id   = &nat_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 491fe5d..349beaf 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -216,7 +216,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_pedit_cleanup(struct tc_action *a, int bind)
+static void tcf_pedit_cleanup(struct tc_action *a)
 {
 	struct tcf_pedit *p = to_pedit(a);
 	struct tc_pedit_key *keys = p->tcfp_keys;
@@ -453,16 +453,14 @@ static __net_init int pedit_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_pedit_ops);
 }
 
-static void __net_exit pedit_exit_net(struct net *net)
+static void __net_exit pedit_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, pedit_net_id);
 }
 
 static struct pernet_operations pedit_net_ops = {
 	.init = pedit_init_net,
-	.exit = pedit_exit_net,
+	.exit_batch = pedit_exit_net,
 	.id   = &pedit_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 3bb2ebf..95d3c90 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -118,13 +118,13 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla,
 	police = to_police(*a);
 	if (parm->rate.rate) {
 		err = -ENOMEM;
-		R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
+		R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
 		if (R_tab == NULL)
 			goto failure;
 
 		if (parm->peakrate.rate) {
 			P_tab = qdisc_get_rtab(&parm->peakrate,
-					       tb[TCA_POLICE_PEAKRATE]);
+					       tb[TCA_POLICE_PEAKRATE], NULL);
 			if (P_tab == NULL)
 				goto failure;
 		}
@@ -334,16 +334,14 @@ static __net_init int police_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_police_ops);
 }
 
-static void __net_exit police_exit_net(struct net *net)
+static void __net_exit police_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, police_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, police_net_id);
 }
 
 static struct pernet_operations police_net_ops = {
 	.init = police_init_net,
-	.exit = police_exit_net,
+	.exit_batch = police_exit_net,
 	.id   = &police_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 9438969..1ba0df23 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -96,7 +96,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_sample_cleanup(struct tc_action *a, int bind)
+static void tcf_sample_cleanup(struct tc_action *a)
 {
 	struct tcf_sample *s = to_sample(a);
 	struct psample_group *psample_group;
@@ -236,16 +236,14 @@ static __net_init int sample_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_sample_ops);
 }
 
-static void __net_exit sample_exit_net(struct net *net)
+static void __net_exit sample_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, sample_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, sample_net_id);
 }
 
 static struct pernet_operations sample_net_ops = {
 	.init = sample_init_net,
-	.exit = sample_exit_net,
+	.exit_batch = sample_exit_net,
 	.id   = &sample_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e7b57e5..425eac1 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
 	return d->tcf_action;
 }
 
-static void tcf_simp_release(struct tc_action *a, int bind)
+static void tcf_simp_release(struct tc_action *a)
 {
 	struct tcf_defact *d = to_defact(a);
 	kfree(d->tcfd_defdata);
@@ -204,16 +204,14 @@ static __net_init int simp_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_simp_ops);
 }
 
-static void __net_exit simp_exit_net(struct net *net)
+static void __net_exit simp_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, simp_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, simp_net_id);
 }
 
 static struct pernet_operations simp_net_ops = {
 	.init = simp_init_net,
-	.exit = simp_exit_net,
+	.exit_batch = simp_exit_net,
 	.id   = &simp_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 59949d6..5a3f691 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -241,16 +241,14 @@ static __net_init int skbedit_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_skbedit_ops);
 }
 
-static void __net_exit skbedit_exit_net(struct net *net)
+static void __net_exit skbedit_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, skbedit_net_id);
 }
 
 static struct pernet_operations skbedit_net_ops = {
 	.init = skbedit_init_net,
-	.exit = skbedit_exit_net,
+	.exit_batch = skbedit_exit_net,
 	.id   = &skbedit_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index b642ad3..fa97526 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -184,7 +184,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
+static void tcf_skbmod_cleanup(struct tc_action *a)
 {
 	struct tcf_skbmod *d = to_skbmod(a);
 	struct tcf_skbmod_params  *p;
@@ -266,16 +266,14 @@ static __net_init int skbmod_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_skbmod_ops);
 }
 
-static void __net_exit skbmod_exit_net(struct net *net)
+static void __net_exit skbmod_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, skbmod_net_id);
 }
 
 static struct pernet_operations skbmod_net_ops = {
 	.init = skbmod_init_net,
-	.exit = skbmod_exit_net,
+	.exit_batch = skbmod_exit_net,
 	.id   = &skbmod_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 30c9627..0e23aac 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -201,7 +201,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tunnel_key_release(struct tc_action *a, int bind)
+static void tunnel_key_release(struct tc_action *a)
 {
 	struct tcf_tunnel_key *t = to_tunnel_key(a);
 	struct tcf_tunnel_key_params *params;
@@ -325,16 +325,14 @@ static __net_init int tunnel_key_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_tunnel_key_ops);
 }
 
-static void __net_exit tunnel_key_exit_net(struct net *net)
+static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, tunnel_key_net_id);
 }
 
 static struct pernet_operations tunnel_key_net_ops = {
 	.init = tunnel_key_init_net,
-	.exit = tunnel_key_exit_net,
+	.exit_batch = tunnel_key_exit_net,
 	.id   = &tunnel_key_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 97f717a..e1a1b3f 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -219,7 +219,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
 	return ret;
 }
 
-static void tcf_vlan_cleanup(struct tc_action *a, int bind)
+static void tcf_vlan_cleanup(struct tc_action *a)
 {
 	struct tcf_vlan *v = to_vlan(a);
 	struct tcf_vlan_params *p;
@@ -301,16 +301,14 @@ static __net_init int vlan_init_net(struct net *net)
 	return tc_action_net_init(tn, &act_vlan_ops);
 }
 
-static void __net_exit vlan_exit_net(struct net *net)
+static void __net_exit vlan_exit_net(struct list_head *net_list)
 {
-	struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
-	tc_action_net_exit(tn);
+	tc_action_net_exit(net_list, vlan_net_id);
 }
 
 static struct pernet_operations vlan_net_ops = {
 	.init = vlan_init_net,
-	.exit = vlan_exit_net,
+	.exit_batch = vlan_exit_net,
 	.id   = &vlan_net_id,
 	.size = sizeof(struct tc_action_net),
 };
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b9d63d22..6708b69 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -217,8 +217,12 @@ static void tcf_chain_flush(struct tcf_chain *chain)
 
 static void tcf_chain_destroy(struct tcf_chain *chain)
 {
+	struct tcf_block *block = chain->block;
+
 	list_del(&chain->list);
 	kfree(chain);
+	if (list_empty(&block->chain_list))
+		kfree(block);
 }
 
 static void tcf_chain_hold(struct tcf_chain *chain)
@@ -277,20 +281,24 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 }
 
 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
-		      struct tcf_block_ext_info *ei)
+		      struct tcf_block_ext_info *ei,
+		      struct netlink_ext_ack *extack)
 {
 	struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
 	struct tcf_chain *chain;
 	int err;
 
-	if (!block)
+	if (!block) {
+		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
 		return -ENOMEM;
+	}
 	INIT_LIST_HEAD(&block->chain_list);
 	INIT_LIST_HEAD(&block->cb_list);
 
 	/* Create chain 0 by default, it has to be always present. */
 	chain = tcf_chain_create(block, 0);
 	if (!chain) {
+		NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
 		err = -ENOMEM;
 		goto err_chain_create;
 	}
@@ -317,7 +325,8 @@ static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
 }
 
 int tcf_block_get(struct tcf_block **p_block,
-		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
+		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
+		  struct netlink_ext_ack *extack)
 {
 	struct tcf_block_ext_info ei = {
 		.chain_head_change = tcf_chain_head_change_dflt,
@@ -325,53 +334,38 @@ int tcf_block_get(struct tcf_block **p_block,
 	};
 
 	WARN_ON(!p_filter_chain);
-	return tcf_block_get_ext(p_block, q, &ei);
+	return tcf_block_get_ext(p_block, q, &ei, extack);
 }
 EXPORT_SYMBOL(tcf_block_get);
 
-static void tcf_block_put_final(struct work_struct *work)
-{
-	struct tcf_block *block = container_of(work, struct tcf_block, work);
-	struct tcf_chain *chain, *tmp;
-
-	rtnl_lock();
-
-	/* At this point, all the chains should have refcnt == 1. */
-	list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
-		tcf_chain_put(chain);
-	rtnl_unlock();
-	kfree(block);
-}
-
 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
  * actions should be all removed after flushing.
  */
 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 		       struct tcf_block_ext_info *ei)
 {
-	struct tcf_chain *chain;
+	struct tcf_chain *chain, *tmp;
 
-	if (!block)
-		return;
-	/* Hold a refcnt for all chains, except 0, so that they don't disappear
+	/* Hold a refcnt for all chains, so that they don't disappear
 	 * while we are iterating.
 	 */
+	if (!block)
+		return;
 	list_for_each_entry(chain, &block->chain_list, list)
-		if (chain->index)
-			tcf_chain_hold(chain);
+		tcf_chain_hold(chain);
 
 	list_for_each_entry(chain, &block->chain_list, list)
 		tcf_chain_flush(chain);
 
 	tcf_block_offload_unbind(block, q, ei);
 
-	INIT_WORK(&block->work, tcf_block_put_final);
-	/* Wait for existing RCU callbacks to cool down, make sure their works
-	 * have been queued before this. We can not flush pending works here
-	 * because we are holding the RTNL lock.
-	 */
-	rcu_barrier();
-	tcf_queue_work(&block->work);
+	/* At this point, all the chains should have refcnt >= 1. */
+	list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+		tcf_chain_put(chain);
+
+	/* Finally, put chain 0 and allow block to be freed. */
+	chain = list_first_entry(&block->chain_list, struct tcf_chain, list);
+	tcf_chain_put(chain);
 }
 EXPORT_SYMBOL(tcf_block_put_ext);
 
@@ -806,7 +800,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 	}
 
 	/* And the last stroke */
-	block = cops->tcf_block(q, cl);
+	block = cops->tcf_block(q, cl, extack);
 	if (!block) {
 		err = -EINVAL;
 		goto errout;
@@ -1053,7 +1047,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
 		if (cl == 0)
 			goto out;
 	}
-	block = cops->tcf_block(q, cl);
+	block = cops->tcf_block(q, cl, NULL);
 	if (!block)
 		goto out;
 
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 543a3e8..6132a73 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -166,6 +166,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 	 * so do it rather here.
 	 */
 	skb_key.basic.n_proto = skb->protocol;
+	skb_flow_dissect_tunnel_info(skb, &head->dissector, &skb_key);
 	skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
 
 	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0f1eab9..8a04c36 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -393,13 +393,16 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
 static struct qdisc_rate_table *qdisc_rtab_list;
 
 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
-					struct nlattr *tab)
+					struct nlattr *tab,
+					struct netlink_ext_ack *extack)
 {
 	struct qdisc_rate_table *rtab;
 
 	if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
-	    nla_len(tab) != TC_RTAB_SIZE)
+	    nla_len(tab) != TC_RTAB_SIZE) {
+		NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
 		return NULL;
+	}
 
 	for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
 		if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
@@ -418,6 +421,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 			r->linklayer = __detect_linklayer(r, rtab->data);
 		rtab->next = qdisc_rtab_list;
 		qdisc_rtab_list = rtab;
+	} else {
+		NL_SET_ERR_MSG(extack, "Failed to allocate new qdisc rate table");
 	}
 	return rtab;
 }
@@ -449,7 +454,8 @@ static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
 	[TCA_STAB_DATA] = { .type = NLA_BINARY },
 };
 
-static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
+static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
+					       struct netlink_ext_ack *extack)
 {
 	struct nlattr *tb[TCA_STAB_MAX + 1];
 	struct qdisc_size_table *stab;
@@ -458,23 +464,29 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
 	u16 *tab = NULL;
 	int err;
 
-	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
+	err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, extack);
 	if (err < 0)
 		return ERR_PTR(err);
-	if (!tb[TCA_STAB_BASE])
+	if (!tb[TCA_STAB_BASE]) {
+		NL_SET_ERR_MSG(extack, "Size table base attribute is missing");
 		return ERR_PTR(-EINVAL);
+	}
 
 	s = nla_data(tb[TCA_STAB_BASE]);
 
 	if (s->tsize > 0) {
-		if (!tb[TCA_STAB_DATA])
+		if (!tb[TCA_STAB_DATA]) {
+			NL_SET_ERR_MSG(extack, "Size table data attribute is missing");
 			return ERR_PTR(-EINVAL);
+		}
 		tab = nla_data(tb[TCA_STAB_DATA]);
 		tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
 	}
 
-	if (tsize != s->tsize || (!tab && tsize > 0))
+	if (tsize != s->tsize || (!tab && tsize > 0)) {
+		NL_SET_ERR_MSG(extack, "Invalid size of size table");
 		return ERR_PTR(-EINVAL);
+	}
 
 	list_for_each_entry(stab, &qdisc_stab_list, list) {
 		if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -669,7 +681,7 @@ int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
 	unsigned int size = 4;
 
 	clhash->hash = qdisc_class_hash_alloc(size);
-	if (clhash->hash == NULL)
+	if (!clhash->hash)
 		return -ENOMEM;
 	clhash->hashsize  = size;
 	clhash->hashmask  = size - 1;
@@ -795,11 +807,11 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
 	tcm->tcm_info = refcount_read(&q->refcnt);
 	if (nla_put_string(skb, TCA_KIND, q->ops->id))
 		goto nla_put_failure;
-	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
-		goto nla_put_failure;
 	if (q->ops->dump && q->ops->dump(q, skb) < 0)
 		goto nla_put_failure;
-	qlen = q->q.qlen;
+	if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED)))
+		goto nla_put_failure;
+	qlen = qdisc_qlen_sum(q);
 
 	stab = rtnl_dereference(q->stab);
 	if (stab && qdisc_dump_stab(skb, stab) < 0)
@@ -898,7 +910,8 @@ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
 
 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 		       struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
-		       struct Qdisc *new, struct Qdisc *old)
+		       struct Qdisc *new, struct Qdisc *old,
+		       struct netlink_ext_ack *extack)
 {
 	struct Qdisc *q = old;
 	struct net *net = dev_net(dev);
@@ -913,8 +926,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 		    (new && new->flags & TCQ_F_INGRESS)) {
 			num_q = 1;
 			ingress = 1;
-			if (!dev_ingress_queue(dev))
+			if (!dev_ingress_queue(dev)) {
+				NL_SET_ERR_MSG(extack, "Device does not have an ingress queue");
 				return -ENOENT;
+			}
 		}
 
 		if (dev->flags & IFF_UP)
@@ -956,14 +971,22 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 	} else {
 		const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
 
+		/* Only support running class lockless if parent is lockless */
+		if (new && (new->flags & TCQ_F_NOLOCK) &&
+		    parent && !(parent->flags & TCQ_F_NOLOCK))
+			new->flags &= ~TCQ_F_NOLOCK;
+
 		err = -EOPNOTSUPP;
 		if (cops && cops->graft) {
 			unsigned long cl = cops->find(parent, classid);
 
-			if (cl)
-				err = cops->graft(parent, cl, new, &old);
-			else
+			if (cl) {
+				err = cops->graft(parent, cl, new, &old,
+						  extack);
+			} else {
+				NL_SET_ERR_MSG(extack, "Specified class not found");
 				err = -ENOENT;
+			}
 		}
 		if (!err)
 			notify_and_destroy(net, skb, n, classid, old, new);
@@ -984,7 +1007,8 @@ static struct lock_class_key qdisc_rx_lock;
 static struct Qdisc *qdisc_create(struct net_device *dev,
 				  struct netdev_queue *dev_queue,
 				  struct Qdisc *p, u32 parent, u32 handle,
-				  struct nlattr **tca, int *errp)
+				  struct nlattr **tca, int *errp,
+				  struct netlink_ext_ack *extack)
 {
 	int err;
 	struct nlattr *kind = tca[TCA_KIND];
@@ -1022,10 +1046,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 #endif
 
 	err = -ENOENT;
-	if (ops == NULL)
+	if (!ops) {
+		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
 		goto err_out;
+	}
 
-	sch = qdisc_alloc(dev_queue, ops);
+	sch = qdisc_alloc(dev_queue, ops, extack);
 	if (IS_ERR(sch)) {
 		err = PTR_ERR(sch);
 		goto err_out2;
@@ -1062,54 +1088,64 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 		netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
 	}
 
-	if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
-		if (qdisc_is_percpu_stats(sch)) {
-			sch->cpu_bstats =
-				netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
-			if (!sch->cpu_bstats)
-				goto err_out4;
-
-			sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
-			if (!sch->cpu_qstats)
-				goto err_out4;
-		}
-
-		if (tca[TCA_STAB]) {
-			stab = qdisc_get_stab(tca[TCA_STAB]);
-			if (IS_ERR(stab)) {
-				err = PTR_ERR(stab);
-				goto err_out4;
-			}
-			rcu_assign_pointer(sch->stab, stab);
-		}
-		if (tca[TCA_RATE]) {
-			seqcount_t *running;
-
-			err = -EOPNOTSUPP;
-			if (sch->flags & TCQ_F_MQROOT)
-				goto err_out4;
-
-			if ((sch->parent != TC_H_ROOT) &&
-			    !(sch->flags & TCQ_F_INGRESS) &&
-			    (!p || !(p->flags & TCQ_F_MQROOT)))
-				running = qdisc_root_sleeping_running(sch);
-			else
-				running = &sch->running;
-
-			err = gen_new_estimator(&sch->bstats,
-						sch->cpu_bstats,
-						&sch->rate_est,
-						NULL,
-						running,
-						tca[TCA_RATE]);
-			if (err)
-				goto err_out4;
-		}
-
-		qdisc_hash_add(sch, false);
-
-		return sch;
+	if (ops->init) {
+		err = ops->init(sch, tca[TCA_OPTIONS], extack);
+		if (err != 0)
+			goto err_out5;
 	}
+
+	if (qdisc_is_percpu_stats(sch)) {
+		sch->cpu_bstats =
+			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+		if (!sch->cpu_bstats)
+			goto err_out4;
+
+		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+		if (!sch->cpu_qstats)
+			goto err_out4;
+	}
+
+	if (tca[TCA_STAB]) {
+		stab = qdisc_get_stab(tca[TCA_STAB], extack);
+		if (IS_ERR(stab)) {
+			err = PTR_ERR(stab);
+			goto err_out4;
+		}
+		rcu_assign_pointer(sch->stab, stab);
+	}
+	if (tca[TCA_RATE]) {
+		seqcount_t *running;
+
+		err = -EOPNOTSUPP;
+		if (sch->flags & TCQ_F_MQROOT) {
+			NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
+			goto err_out4;
+		}
+
+		if (sch->parent != TC_H_ROOT &&
+		    !(sch->flags & TCQ_F_INGRESS) &&
+		    (!p || !(p->flags & TCQ_F_MQROOT)))
+			running = qdisc_root_sleeping_running(sch);
+		else
+			running = &sch->running;
+
+		err = gen_new_estimator(&sch->bstats,
+					sch->cpu_bstats,
+					&sch->rate_est,
+					NULL,
+					running,
+					tca[TCA_RATE]);
+		if (err) {
+			NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
+			goto err_out4;
+		}
+	}
+
+	qdisc_hash_add(sch, false);
+
+	return sch;
+
+err_out5:
 	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
 	if (ops->destroy)
 		ops->destroy(sch);
@@ -1135,21 +1171,24 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 	goto err_out3;
 }
 
-static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
+static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
+			struct netlink_ext_ack *extack)
 {
 	struct qdisc_size_table *ostab, *stab = NULL;
 	int err = 0;
 
 	if (tca[TCA_OPTIONS]) {
-		if (sch->ops->change == NULL)
+		if (!sch->ops->change) {
+			NL_SET_ERR_MSG(extack, "Change operation not supported by specified qdisc");
 			return -EINVAL;
-		err = sch->ops->change(sch, tca[TCA_OPTIONS]);
+		}
+		err = sch->ops->change(sch, tca[TCA_OPTIONS], extack);
 		if (err)
 			return err;
 	}
 
 	if (tca[TCA_STAB]) {
-		stab = qdisc_get_stab(tca[TCA_STAB]);
+		stab = qdisc_get_stab(tca[TCA_STAB], extack);
 		if (IS_ERR(stab))
 			return PTR_ERR(stab);
 	}
@@ -1247,8 +1286,10 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 		if (clid != TC_H_ROOT) {
 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
-				if (!p)
+				if (!p) {
+					NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified classid");
 					return -ENOENT;
+				}
 				q = qdisc_leaf(p, clid);
 			} else if (dev_ingress_queue(dev)) {
 				q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1256,26 +1297,38 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 		} else {
 			q = dev->qdisc;
 		}
-		if (!q)
+		if (!q) {
+			NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
 			return -ENOENT;
+		}
 
-		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
+		if (tcm->tcm_handle && q->handle != tcm->tcm_handle) {
+			NL_SET_ERR_MSG(extack, "Invalid handle");
 			return -EINVAL;
+		}
 	} else {
 		q = qdisc_lookup(dev, tcm->tcm_handle);
-		if (!q)
+		if (!q) {
+			NL_SET_ERR_MSG(extack, "Failed to find qdisc with specified handle");
 			return -ENOENT;
+		}
 	}
 
-	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
 		return -EINVAL;
+	}
 
 	if (n->nlmsg_type == RTM_DELQDISC) {
-		if (!clid)
+		if (!clid) {
+			NL_SET_ERR_MSG(extack, "Classid cannot be zero");
 			return -EINVAL;
-		if (q->handle == 0)
+		}
+		if (q->handle == 0) {
+			NL_SET_ERR_MSG(extack, "Cannot delete qdisc with handle of zero");
 			return -ENOENT;
-		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+		}
+		err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack);
 		if (err != 0)
 			return err;
 	} else {
@@ -1321,8 +1374,10 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 		if (clid != TC_H_ROOT) {
 			if (clid != TC_H_INGRESS) {
 				p = qdisc_lookup(dev, TC_H_MAJ(clid));
-				if (!p)
+				if (!p) {
+					NL_SET_ERR_MSG(extack, "Failed to find specified qdisc");
 					return -ENOENT;
+				}
 				q = qdisc_leaf(p, clid);
 			} else if (dev_ingress_queue_create(dev)) {
 				q = dev_ingress_queue(dev)->qdisc_sleeping;
@@ -1337,20 +1392,31 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 
 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
 			if (tcm->tcm_handle) {
-				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
+				if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) {
+					NL_SET_ERR_MSG(extack, "NLM_F_REPLACE needed to override");
 					return -EEXIST;
-				if (TC_H_MIN(tcm->tcm_handle))
+				}
+				if (TC_H_MIN(tcm->tcm_handle)) {
+					NL_SET_ERR_MSG(extack, "Invalid minor handle");
 					return -EINVAL;
+				}
 				q = qdisc_lookup(dev, tcm->tcm_handle);
 				if (!q)
 					goto create_n_graft;
-				if (n->nlmsg_flags & NLM_F_EXCL)
+				if (n->nlmsg_flags & NLM_F_EXCL) {
+					NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
 					return -EEXIST;
-				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+				}
+				if (tca[TCA_KIND] &&
+				    nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+					NL_SET_ERR_MSG(extack, "Invalid qdisc name");
 					return -EINVAL;
+				}
 				if (q == p ||
-				    (p && check_loop(q, p, 0)))
+				    (p && check_loop(q, p, 0))) {
+					NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
 					return -ELOOP;
+				}
 				qdisc_refcount_inc(q);
 				goto graft;
 			} else {
@@ -1385,33 +1451,45 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 			}
 		}
 	} else {
-		if (!tcm->tcm_handle)
+		if (!tcm->tcm_handle) {
+			NL_SET_ERR_MSG(extack, "Handle cannot be zero");
 			return -EINVAL;
+		}
 		q = qdisc_lookup(dev, tcm->tcm_handle);
 	}
 
 	/* Change qdisc parameters */
-	if (q == NULL)
+	if (!q) {
+		NL_SET_ERR_MSG(extack, "Specified qdisc not found");
 		return -ENOENT;
-	if (n->nlmsg_flags & NLM_F_EXCL)
+	}
+	if (n->nlmsg_flags & NLM_F_EXCL) {
+		NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot modify");
 		return -EEXIST;
-	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
+	}
+	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
+		NL_SET_ERR_MSG(extack, "Invalid qdisc name");
 		return -EINVAL;
-	err = qdisc_change(q, tca);
+	}
+	err = qdisc_change(q, tca, extack);
 	if (err == 0)
 		qdisc_notify(net, skb, n, clid, NULL, q);
 	return err;
 
 create_n_graft:
-	if (!(n->nlmsg_flags & NLM_F_CREATE))
+	if (!(n->nlmsg_flags & NLM_F_CREATE)) {
+		NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag");
 		return -ENOENT;
+	}
 	if (clid == TC_H_INGRESS) {
-		if (dev_ingress_queue(dev))
+		if (dev_ingress_queue(dev)) {
 			q = qdisc_create(dev, dev_ingress_queue(dev), p,
 					 tcm->tcm_parent, tcm->tcm_parent,
-					 tca, &err);
-		else
+					 tca, &err, extack);
+		} else {
+			NL_SET_ERR_MSG(extack, "Cannot find ingress queue for specified device");
 			err = -ENOENT;
+		}
 	} else {
 		struct netdev_queue *dev_queue;
 
@@ -1424,7 +1502,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 
 		q = qdisc_create(dev, dev_queue, p,
 				 tcm->tcm_parent, tcm->tcm_handle,
-				 tca, &err);
+				 tca, &err, extack);
 	}
 	if (q == NULL) {
 		if (err == -EAGAIN)
@@ -1433,7 +1511,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 	}
 
 graft:
-	err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+	err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
 	if (err) {
 		if (q)
 			qdisc_destroy(q);
@@ -1685,7 +1763,7 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
 	cl = cops->find(q, portid);
 	if (!cl)
 		return;
-	block = cops->tcf_block(q, cl);
+	block = cops->tcf_block(q, cl, NULL);
 	if (!block)
 		return;
 	list_for_each_entry(chain, &block->chain_list, list) {
@@ -1832,7 +1910,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
 	new_cl = cl;
 	err = -EOPNOTSUPP;
 	if (cops->change)
-		err = cops->change(q, clid, portid, tca, &new_cl);
+		err = cops->change(q, clid, portid, tca, &new_cl, extack);
 	if (err == 0) {
 		tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
 		/* We just create a new class, need to do reverse binding. */
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2dbd249..cd49afc 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -82,7 +82,8 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
 }
 
 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
-			struct Qdisc *new, struct Qdisc **old)
+			struct Qdisc *new, struct Qdisc **old,
+			struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	struct atm_flow_data *flow = (struct atm_flow_data *)arg;
@@ -191,7 +192,8 @@ static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
 };
 
 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
-			 struct nlattr **tca, unsigned long *arg)
+			 struct nlattr **tca, unsigned long *arg,
+			 struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
@@ -281,13 +283,15 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
 		goto err_out;
 	}
 
-	error = tcf_block_get(&flow->block, &flow->filter_list, sch);
+	error = tcf_block_get(&flow->block, &flow->filter_list, sch,
+			      extack);
 	if (error) {
 		kfree(flow);
 		goto err_out;
 	}
 
-	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+	flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+				    extack);
 	if (!flow->q)
 		flow->q = &noop_qdisc;
 	pr_debug("atm_tc_change: qdisc %p\n", flow->q);
@@ -356,7 +360,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 	}
 }
 
-static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	struct atm_flow_data *flow = (struct atm_flow_data *)cl;
@@ -531,7 +536,8 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
 	return p->link.q->ops->peek(p->link.q);
 }
 
-static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
+static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct atm_qdisc_data *p = qdisc_priv(sch);
 	int err;
@@ -541,12 +547,13 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
 	INIT_LIST_HEAD(&p->link.list);
 	list_add(&p->link.list, &p->flows);
 	p->link.q = qdisc_create_dflt(sch->dev_queue,
-				      &pfifo_qdisc_ops, sch->handle);
+				      &pfifo_qdisc_ops, sch->handle, extack);
 	if (!p->link.q)
 		p->link.q = &noop_qdisc;
 	pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
 
-	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch);
+	err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
+			    extack);
 	if (err)
 		return err;
 
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 525eb3a..f42025d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1132,7 +1132,8 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
 	[TCA_CBQ_POLICE]	= { .len = sizeof(struct tc_cbq_police) },
 };
 
-static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_CBQ_MAX + 1];
@@ -1143,22 +1144,27 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
 	hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
 	q->delay_timer.function = cbq_undelay;
 
-	if (!opt)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
 		return -EINVAL;
+	}
 
-	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
+	if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
+		NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
 		return -EINVAL;
+	}
 
 	r = nla_data(tb[TCA_CBQ_RATE]);
 
-	if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
+	q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
+	if (!q->link.R_tab)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+	err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
 	if (err)
 		goto put_rtab;
 
@@ -1170,7 +1176,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
 	q->link.common.classid = sch->handle;
 	q->link.qdisc = sch;
 	q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-				      sch->handle);
+				      sch->handle, NULL);
 	if (!q->link.q)
 		q->link.q = &noop_qdisc;
 	else
@@ -1369,13 +1375,13 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 }
 
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct cbq_class *cl = (struct cbq_class *)arg;
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev_queue,
-					&pfifo_qdisc_ops, cl->common.classid);
+		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					cl->common.classid, extack);
 		if (new == NULL)
 			return -ENOBUFS;
 	}
@@ -1450,7 +1456,7 @@ static void cbq_destroy(struct Qdisc *sch)
 
 static int
 cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
-		 unsigned long *arg)
+		 unsigned long *arg, struct netlink_ext_ack *extack)
 {
 	int err;
 	struct cbq_sched_data *q = qdisc_priv(sch);
@@ -1460,29 +1466,37 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	struct cbq_class *parent;
 	struct qdisc_rate_table *rtab = NULL;
 
-	if (opt == NULL)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
 		return -EINVAL;
+	}
 
-	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
+	err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
+	if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
+		NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
 		return -EOPNOTSUPP;
+	}
 
 	if (cl) {
 		/* Check parent */
 		if (parentid) {
 			if (cl->tparent &&
-			    cl->tparent->common.classid != parentid)
+			    cl->tparent->common.classid != parentid) {
+				NL_SET_ERR_MSG(extack, "Invalid parent id");
 				return -EINVAL;
-			if (!cl->tparent && parentid != TC_H_ROOT)
+			}
+			if (!cl->tparent && parentid != TC_H_ROOT) {
+				NL_SET_ERR_MSG(extack, "Parent must be root");
 				return -EINVAL;
+			}
 		}
 
 		if (tb[TCA_CBQ_RATE]) {
 			rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
-					      tb[TCA_CBQ_RTAB]);
+					      tb[TCA_CBQ_RTAB], extack);
 			if (rtab == NULL)
 				return -EINVAL;
 		}
@@ -1494,6 +1508,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 						    qdisc_root_sleeping_running(sch),
 						    tca[TCA_RATE]);
 			if (err) {
+				NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
 				qdisc_put_rtab(rtab);
 				return err;
 			}
@@ -1532,19 +1547,23 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	if (parentid == TC_H_ROOT)
 		return -EINVAL;
 
-	if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
-	    tb[TCA_CBQ_LSSOPT] == NULL)
+	if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
+		NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
 		return -EINVAL;
+	}
 
-	rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
+	rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
+			      extack);
 	if (rtab == NULL)
 		return -EINVAL;
 
 	if (classid) {
 		err = -EINVAL;
 		if (TC_H_MAJ(classid ^ sch->handle) ||
-		    cbq_class_lookup(q, classid))
+		    cbq_class_lookup(q, classid)) {
+			NL_SET_ERR_MSG(extack, "Specified class not found");
 			goto failure;
+		}
 	} else {
 		int i;
 		classid = TC_H_MAKE(sch->handle, 0x8000);
@@ -1556,8 +1575,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 				break;
 		}
 		err = -ENOSR;
-		if (i >= 0x8000)
+		if (i >= 0x8000) {
+			NL_SET_ERR_MSG(extack, "Unable to generate classid");
 			goto failure;
+		}
 		classid = classid|q->hgenerator;
 	}
 
@@ -1565,8 +1586,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	if (parentid) {
 		parent = cbq_class_lookup(q, parentid);
 		err = -EINVAL;
-		if (parent == NULL)
+		if (!parent) {
+			NL_SET_ERR_MSG(extack, "Failed to find parentid");
 			goto failure;
+		}
 	}
 
 	err = -ENOBUFS;
@@ -1574,7 +1597,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 	if (cl == NULL)
 		goto failure;
 
-	err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
 	if (err) {
 		kfree(cl);
 		return err;
@@ -1586,6 +1609,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 					qdisc_root_sleeping_running(sch),
 					tca[TCA_RATE]);
 		if (err) {
+			NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
 			tcf_block_put(cl->block);
 			kfree(cl);
 			goto failure;
@@ -1594,7 +1618,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
 	cl->R_tab = rtab;
 	rtab = NULL;
-	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
+	cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
+				  NULL);
 	if (!cl->q)
 		cl->q = &noop_qdisc;
 	else
@@ -1678,7 +1703,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 	return 0;
 }
 
-static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
+				       struct netlink_ext_ack *extack)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct cbq_class *cl = (struct cbq_class *)arg;
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 7a72980..cdd96b9 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -219,14 +219,17 @@ static void cbs_disable_offload(struct net_device *dev,
 }
 
 static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
-			      const struct tc_cbs_qopt *opt)
+			      const struct tc_cbs_qopt *opt,
+			      struct netlink_ext_ack *extack)
 {
 	const struct net_device_ops *ops = dev->netdev_ops;
 	struct tc_cbs_qopt_offload cbs = { };
 	int err;
 
-	if (!ops->ndo_setup_tc)
+	if (!ops->ndo_setup_tc) {
+		NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload");
 		return -EOPNOTSUPP;
+	}
 
 	cbs.queue = q->queue;
 
@@ -237,8 +240,10 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
 	cbs.sendslope = opt->sendslope;
 
 	err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs);
-	if (err < 0)
+	if (err < 0) {
+		NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload");
 		return err;
+	}
 
 	q->enqueue = cbs_enqueue_offload;
 	q->dequeue = cbs_dequeue_offload;
@@ -246,7 +251,8 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
 	return 0;
 }
 
-static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct cbs_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
@@ -254,12 +260,14 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 	struct tc_cbs_qopt *qopt;
 	int err;
 
-	err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, NULL);
+	err = nla_parse_nested(tb, TCA_CBS_MAX, opt, cbs_policy, extack);
 	if (err < 0)
 		return err;
 
-	if (!tb[TCA_CBS_PARMS])
+	if (!tb[TCA_CBS_PARMS]) {
+		NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory");
 		return -EINVAL;
+	}
 
 	qopt = nla_data(tb[TCA_CBS_PARMS]);
 
@@ -276,7 +284,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 
 		cbs_disable_offload(dev, q);
 	} else {
-		err = cbs_enable_offload(dev, q, qopt);
+		err = cbs_enable_offload(dev, q, qopt, extack);
 		if (err < 0)
 			return err;
 	}
@@ -291,13 +299,16 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
+static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct cbs_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
 
-	if (!opt)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "Missing CBS qdisc options  which are mandatory");
 		return -EINVAL;
+	}
 
 	q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
 
@@ -306,7 +317,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt)
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 
-	return cbs_change(sch, opt);
+	return cbs_change(sch, opt, extack);
 }
 
 static void cbs_destroy(struct Qdisc *sch)
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 531250f..eafc0d1 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -344,7 +344,8 @@ static void choke_free(void *addr)
 	kvfree(addr);
 }
 
-static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+static int choke_change(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct choke_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_CHOKE_MAX + 1];
@@ -431,9 +432,10 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+static int choke_init(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
-	return choke_change(sch, opt);
+	return choke_change(sch, opt, extack);
 }
 
 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index c518a1e..17cd81f 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -130,7 +130,8 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
 	[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
 };
 
-static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct codel_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_CODEL_MAX + 1];
@@ -184,7 +185,8 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int codel_init(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct codel_sched_data *q = qdisc_priv(sch);
 
@@ -196,7 +198,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
 	q->params.mtu = psched_mtu(qdisc_dev(sch));
 
 	if (opt) {
-		int err = codel_change(sch, opt);
+		int err = codel_change(sch, opt, extack);
 
 		if (err)
 			return err;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 5bbcef3..e0b0cf8 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -64,7 +64,8 @@ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
 };
 
 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct nlattr **tca, unsigned long *arg)
+			    struct nlattr **tca, unsigned long *arg,
+			    struct netlink_ext_ack *extack)
 {
 	struct drr_sched *q = qdisc_priv(sch);
 	struct drr_class *cl = (struct drr_class *)*arg;
@@ -73,17 +74,21 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	u32 quantum;
 	int err;
 
-	if (!opt)
+	if (!opt) {
+		NL_SET_ERR_MSG(extack, "DRR options are required for this operation");
 		return -EINVAL;
+	}
 
-	err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, NULL);
+	err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
 	if (err < 0)
 		return err;
 
 	if (tb[TCA_DRR_QUANTUM]) {
 		quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
-		if (quantum == 0)
+		if (quantum == 0) {
+			NL_SET_ERR_MSG(extack, "Specified DRR quantum cannot be zero");
 			return -EINVAL;
+		}
 	} else
 		quantum = psched_mtu(qdisc_dev(sch));
 
@@ -94,8 +99,10 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 						    NULL,
 						    qdisc_root_sleeping_running(sch),
 						    tca[TCA_RATE]);
-			if (err)
+			if (err) {
+				NL_SET_ERR_MSG(extack, "Failed to replace estimator");
 				return err;
+			}
 		}
 
 		sch_tree_lock(sch);
@@ -113,7 +120,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	cl->common.classid = classid;
 	cl->quantum	   = quantum;
 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
-					       &pfifo_qdisc_ops, classid);
+					       &pfifo_qdisc_ops, classid,
+					       NULL);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
 	else
@@ -125,6 +133,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 					    qdisc_root_sleeping_running(sch),
 					    tca[TCA_RATE]);
 		if (err) {
+			NL_SET_ERR_MSG(extack, "Failed to replace estimator");
 			qdisc_destroy(cl->qdisc);
 			kfree(cl);
 			return err;
@@ -172,12 +181,15 @@ static unsigned long drr_search_class(struct Qdisc *sch, u32 classid)
 	return (unsigned long)drr_find_class(sch, classid);
 }
 
-static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *drr_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct drr_sched *q = qdisc_priv(sch);
 
-	if (cl)
+	if (cl) {
+		NL_SET_ERR_MSG(extack, "DRR classid must be zero");
 		return NULL;
+	}
 
 	return q->block;
 }
@@ -201,13 +213,14 @@ static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
-			   struct Qdisc *new, struct Qdisc **old)
+			   struct Qdisc *new, struct Qdisc **old,
+			   struct netlink_ext_ack *extack)
 {
 	struct drr_class *cl = (struct drr_class *)arg;
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev_queue,
-					&pfifo_qdisc_ops, cl->common.classid);
+		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					cl->common.classid, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -408,12 +421,13 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 	return NULL;
 }
 
-static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+			  struct netlink_ext_ack *extack)
 {
 	struct drr_sched *q = qdisc_priv(sch);
 	int err;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 	err = qdisc_class_hash_init(&q->clhash);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index fb4fb71..049714c 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -61,7 +61,8 @@ static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
 /* ------------------------- Class/flow operations ------------------------- */
 
 static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
-			struct Qdisc *new, struct Qdisc **old)
+			struct Qdisc *new, struct Qdisc **old,
+			struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -70,7 +71,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
 
 	if (new == NULL) {
 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					sch->handle);
+					sch->handle, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -112,7 +113,8 @@ static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
 };
 
 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
-			 struct nlattr **tca, unsigned long *arg)
+			 struct nlattr **tca, unsigned long *arg,
+			 struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 	struct nlattr *opt = tca[TCA_OPTIONS];
@@ -184,7 +186,8 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 	}
 }
 
-static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 
@@ -330,7 +333,8 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch)
 	return p->q->ops->peek(p->q);
 }
 
-static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
+static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct dsmark_qdisc_data *p = qdisc_priv(sch);
 	struct nlattr *tb[TCA_DSMARK_MAX + 1];
@@ -344,7 +348,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
 	if (!opt)
 		goto errout;
 
-	err = tcf_block_get(&p->block, &p->filter_list, sch);
+	err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -377,7 +381,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
 	p->default_index = default_index;
 	p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
 
-	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
+	p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
+				 NULL);
 	if (p->q == NULL)
 		p->q = &noop_qdisc;
 	else
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 1e37247..24893d3 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -55,7 +55,8 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	return NET_XMIT_CN;
 }
 
-static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
+static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	bool bypass;
 	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
@@ -157,7 +158,7 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
 		nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
 		((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
 
-		ret = q->ops->change(q, nla);
+		ret = q->ops->change(q, nla, NULL);
 		kfree(nla);
 	}
 	return ret;
@@ -165,12 +166,14 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit)
 EXPORT_SYMBOL(fifo_set_limit);
 
 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
-			       unsigned int limit)
+			       unsigned int limit,
+			       struct netlink_ext_ack *extack)
 {
 	struct Qdisc *q;
 	int err = -ENOMEM;
 
-	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
+	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
+			      extack);
 	if (q) {
 		err = fifo_set_limit(q, limit);
 		if (err < 0) {
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 263d16e..a366e4c 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -685,7 +685,8 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
 	[TCA_FQ_LOW_RATE_THRESHOLD]	= { .type = NLA_U32 },
 };
 
-static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_FQ_MAX + 1];
@@ -788,7 +789,8 @@ static void fq_destroy(struct Qdisc *sch)
 	qdisc_watchdog_cancel(&q->watchdog);
 }
 
-static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_init(struct Qdisc *sch, struct nlattr *opt,
+		   struct netlink_ext_ack *extack)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
 	int err;
@@ -811,7 +813,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
 	qdisc_watchdog_init(&q->watchdog, sch);
 
 	if (opt)
-		err = fq_change(sch, opt);
+		err = fq_change(sch, opt, extack);
 	else
 		err = fq_resize(sch, q->fq_trees_log);
 
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 0305d79..22fa13c 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -377,7 +377,8 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
 	[TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
 };
 
-static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+			   struct netlink_ext_ack *extack)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
@@ -458,7 +459,8 @@ static void fq_codel_destroy(struct Qdisc *sch)
 	kvfree(q->flows);
 }
 
-static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
+static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
+			 struct netlink_ext_ack *extack)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 	int i;
@@ -477,12 +479,12 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
 	q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
 	if (opt) {
-		int err = fq_codel_change(sch, opt);
+		int err = fq_codel_change(sch, opt, extack);
 		if (err)
 			return err;
 	}
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -595,7 +597,8 @@ static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
+					    struct netlink_ext_ack *extack)
 {
 	struct fq_codel_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 661c714..a883c50 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -26,11 +26,13 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/if_vlan.h>
+#include <linux/skb_array.h>
 #include <linux/if_macvlan.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
 #include <trace/events/qdisc.h>
+#include <net/xfrm.h>
 
 /* Qdisc to use by default */
 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
@@ -47,17 +49,115 @@ EXPORT_SYMBOL(default_qdisc_ops);
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
-static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
 {
-	q->gso_skb = skb;
-	q->qstats.requeues++;
-	qdisc_qstats_backlog_inc(q, skb);
-	q->q.qlen++;	/* it's still part of the queue */
+	const struct netdev_queue *txq = q->dev_queue;
+	spinlock_t *lock = NULL;
+	struct sk_buff *skb;
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		lock = qdisc_lock(q);
+		spin_lock(lock);
+	}
+
+	skb = skb_peek(&q->skb_bad_txq);
+	if (skb) {
+		/* check the reason of requeuing without tx lock first */
+		txq = skb_get_tx_queue(txq->dev, skb);
+		if (!netif_xmit_frozen_or_stopped(txq)) {
+			skb = __skb_dequeue(&q->skb_bad_txq);
+			if (qdisc_is_percpu_stats(q)) {
+				qdisc_qstats_cpu_backlog_dec(q, skb);
+				qdisc_qstats_cpu_qlen_dec(q);
+			} else {
+				qdisc_qstats_backlog_dec(q, skb);
+				q->q.qlen--;
+			}
+		} else {
+			skb = NULL;
+		}
+	}
+
+	if (lock)
+		spin_unlock(lock);
+
+	return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
+{
+	struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
+
+	if (unlikely(skb))
+		skb = __skb_dequeue_bad_txq(q);
+
+	return skb;
+}
+
+static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
+					     struct sk_buff *skb)
+{
+	spinlock_t *lock = NULL;
+
+	if (q->flags & TCQ_F_NOLOCK) {
+		lock = qdisc_lock(q);
+		spin_lock(lock);
+	}
+
+	__skb_queue_tail(&q->skb_bad_txq, skb);
+
+	if (lock)
+		spin_unlock(lock);
+}
+
+static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+{
+	while (skb) {
+		struct sk_buff *next = skb->next;
+
+		__skb_queue_tail(&q->gso_skb, skb);
+		q->qstats.requeues++;
+		qdisc_qstats_backlog_inc(q, skb);
+		q->q.qlen++;	/* it's still part of the queue */
+
+		skb = next;
+	}
 	__netif_schedule(q);
 
 	return 0;
 }
 
+static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
+{
+	spinlock_t *lock = qdisc_lock(q);
+
+	spin_lock(lock);
+	while (skb) {
+		struct sk_buff *next = skb->next;
+
+		__skb_queue_tail(&q->gso_skb, skb);
+
+		qdisc_qstats_cpu_requeues_inc(q);
+		qdisc_qstats_cpu_backlog_inc(q, skb);
+		qdisc_qstats_cpu_qlen_inc(q);
+
+		skb = next;
+	}
+	spin_unlock(lock);
+
+	__netif_schedule(q);
+
+	return 0;
+}
+
+static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
+{
+	if (q->flags & TCQ_F_NOLOCK)
+		return dev_requeue_skb_locked(skb, q);
+	else
+		return __dev_requeue_skb(skb, q);
+}
+
 static void try_bulk_dequeue_skb(struct Qdisc *q,
 				 struct sk_buff *skb,
 				 const struct netdev_queue *txq,
@@ -95,9 +195,15 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 		if (!nskb)
 			break;
 		if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
-			q->skb_bad_txq = nskb;
-			qdisc_qstats_backlog_inc(q, nskb);
-			q->q.qlen++;
+			qdisc_enqueue_skb_bad_txq(q, nskb);
+
+			if (qdisc_is_percpu_stats(q)) {
+				qdisc_qstats_cpu_backlog_inc(q, nskb);
+				qdisc_qstats_cpu_qlen_inc(q);
+			} else {
+				qdisc_qstats_backlog_inc(q, nskb);
+				q->q.qlen++;
+			}
 			break;
 		}
 		skb->next = nskb;
@@ -113,40 +219,62 @@ static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
 				   int *packets)
 {
-	struct sk_buff *skb = q->gso_skb;
 	const struct netdev_queue *txq = q->dev_queue;
+	struct sk_buff *skb = NULL;
 
 	*packets = 1;
-	if (unlikely(skb)) {
+	if (unlikely(!skb_queue_empty(&q->gso_skb))) {
+		spinlock_t *lock = NULL;
+
+		if (q->flags & TCQ_F_NOLOCK) {
+			lock = qdisc_lock(q);
+			spin_lock(lock);
+		}
+
+		skb = skb_peek(&q->gso_skb);
+
+		/* skb may be null if another cpu pulls gso_skb off in between
+		 * empty check and lock.
+		 */
+		if (!skb) {
+			if (lock)
+				spin_unlock(lock);
+			goto validate;
+		}
+
 		/* skb in gso_skb were already validated */
 		*validate = false;
+		if (xfrm_offload(skb))
+			*validate = true;
 		/* check the reason of requeuing without tx lock first */
 		txq = skb_get_tx_queue(txq->dev, skb);
 		if (!netif_xmit_frozen_or_stopped(txq)) {
-			q->gso_skb = NULL;
-			qdisc_qstats_backlog_dec(q, skb);
-			q->q.qlen--;
-		} else
+			skb = __skb_dequeue(&q->gso_skb);
+			if (qdisc_is_percpu_stats(q)) {
+				qdisc_qstats_cpu_backlog_dec(q, skb);
+				qdisc_qstats_cpu_qlen_dec(q);
+			} else {
+				qdisc_qstats_backlog_dec(q, skb);
+				q->q.qlen--;
+			}
+		} else {
 			skb = NULL;
-		goto trace;
-	}
-	*validate = true;
-	skb = q->skb_bad_txq;
-	if (unlikely(skb)) {
-		/* check the reason of requeuing without tx lock first */
-		txq = skb_get_tx_queue(txq->dev, skb);
-		if (!netif_xmit_frozen_or_stopped(txq)) {
-			q->skb_bad_txq = NULL;
-			qdisc_qstats_backlog_dec(q, skb);
-			q->q.qlen--;
-			goto bulk;
 		}
-		skb = NULL;
+		if (lock)
+			spin_unlock(lock);
 		goto trace;
 	}
-	if (!(q->flags & TCQ_F_ONETXQUEUE) ||
-	    !netif_xmit_frozen_or_stopped(txq))
-		skb = q->dequeue(q);
+validate:
+	*validate = true;
+
+	if ((q->flags & TCQ_F_ONETXQUEUE) &&
+	    netif_xmit_frozen_or_stopped(txq))
+		return skb;
+
+	skb = qdisc_dequeue_skb_bad_txq(q);
+	if (unlikely(skb))
+		goto bulk;
+	skb = q->dequeue(q);
 	if (skb) {
 bulk:
 		if (qdisc_may_bulk(q))
@@ -165,21 +293,33 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
  * only one CPU can execute this function.
  *
  * Returns to the caller:
- *				0  - queue is empty or throttled.
- *				>0 - queue is not empty.
+ *				false  - hardware queue frozen backoff
+ *				true   - feel free to send more pkts
  */
-int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
-		    struct net_device *dev, struct netdev_queue *txq,
-		    spinlock_t *root_lock, bool validate)
+bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+		     struct net_device *dev, struct netdev_queue *txq,
+		     spinlock_t *root_lock, bool validate)
 {
 	int ret = NETDEV_TX_BUSY;
+	bool again = false;
 
 	/* And release qdisc */
-	spin_unlock(root_lock);
+	if (root_lock)
+		spin_unlock(root_lock);
 
 	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
 	if (validate)
-		skb = validate_xmit_skb_list(skb, dev);
+		skb = validate_xmit_skb_list(skb, dev, &again);
+
+#ifdef CONFIG_XFRM_OFFLOAD
+	if (unlikely(again)) {
+		if (root_lock)
+			spin_lock(root_lock);
+
+		dev_requeue_skb(skb, q);
+		return false;
+	}
+#endif
 
 	if (likely(skb)) {
 		HARD_TX_LOCK(dev, txq, smp_processor_id());
@@ -188,27 +328,28 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 
 		HARD_TX_UNLOCK(dev, txq);
 	} else {
-		spin_lock(root_lock);
-		return qdisc_qlen(q);
+		if (root_lock)
+			spin_lock(root_lock);
+		return true;
 	}
-	spin_lock(root_lock);
 
-	if (dev_xmit_complete(ret)) {
-		/* Driver sent out skb successfully or skb was consumed */
-		ret = qdisc_qlen(q);
-	} else {
+	if (root_lock)
+		spin_lock(root_lock);
+
+	if (!dev_xmit_complete(ret)) {
 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 		if (unlikely(ret != NETDEV_TX_BUSY))
 			net_warn_ratelimited("BUG %s code %d qlen %d\n",
 					     dev->name, ret, q->q.qlen);
 
-		ret = dev_requeue_skb(skb, q);
+		dev_requeue_skb(skb, q);
+		return false;
 	}
 
 	if (ret && netif_xmit_frozen_or_stopped(txq))
-		ret = 0;
+		return false;
 
-	return ret;
+	return true;
 }
 
 /*
@@ -230,20 +371,22 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
  *				>0 - queue is not empty.
  *
  */
-static inline int qdisc_restart(struct Qdisc *q, int *packets)
+static inline bool qdisc_restart(struct Qdisc *q, int *packets)
 {
+	spinlock_t *root_lock = NULL;
 	struct netdev_queue *txq;
 	struct net_device *dev;
-	spinlock_t *root_lock;
 	struct sk_buff *skb;
 	bool validate;
 
 	/* Dequeue packet */
 	skb = dequeue_skb(q, &validate, packets);
 	if (unlikely(!skb))
-		return 0;
+		return false;
 
-	root_lock = qdisc_lock(q);
+	if (!(q->flags & TCQ_F_NOLOCK))
+		root_lock = qdisc_lock(q);
+
 	dev = qdisc_dev(q);
 	txq = skb_get_tx_queue(dev, skb);
 
@@ -267,8 +410,6 @@ void __qdisc_run(struct Qdisc *q)
 			break;
 		}
 	}
-
-	qdisc_run_end(q);
 }
 
 unsigned long dev_trans_start(struct net_device *dev)
@@ -437,7 +578,8 @@ struct Qdisc noop_qdisc = {
 };
 EXPORT_SYMBOL(noop_qdisc);
 
-static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	/* register_qdisc() assigns a default of noop_enqueue if unset,
 	 * but __dev_queue_xmit() treats noqueue only as such
@@ -468,93 +610,99 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
 
 /*
  * Private data for a pfifo_fast scheduler containing:
- * 	- queues for the three band
- * 	- bitmap indicating which of the bands contain skbs
+ *	- rings for priority bands
  */
 struct pfifo_fast_priv {
-	u32 bitmap;
-	struct qdisc_skb_head q[PFIFO_FAST_BANDS];
+	struct skb_array q[PFIFO_FAST_BANDS];
 };
 
-/*
- * Convert a bitmap to the first band number where an skb is queued, where:
- * 	bitmap=0 means there are no skbs on any band.
- * 	bitmap=1 means there is an skb on band 0.
- *	bitmap=7 means there are skbs on all 3 bands, etc.
- */
-static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
-
-static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
-					     int band)
+static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
+					  int band)
 {
-	return priv->q + band;
+	return &priv->q[band];
 }
 
 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
 			      struct sk_buff **to_free)
 {
-	if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
-		int band = prio2band[skb->priority & TC_PRIO_MAX];
-		struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
-		struct qdisc_skb_head *list = band2list(priv, band);
+	int band = prio2band[skb->priority & TC_PRIO_MAX];
+	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+	struct skb_array *q = band2list(priv, band);
+	int err;
 
-		priv->bitmap |= (1 << band);
-		qdisc->q.qlen++;
-		return __qdisc_enqueue_tail(skb, qdisc, list);
-	}
+	err = skb_array_produce(q, skb);
 
-	return qdisc_drop(skb, qdisc, to_free);
+	if (unlikely(err))
+		return qdisc_drop_cpu(skb, qdisc, to_free);
+
+	qdisc_qstats_cpu_qlen_inc(qdisc);
+	qdisc_qstats_cpu_backlog_inc(qdisc, skb);
+	return NET_XMIT_SUCCESS;
 }
 
 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 {
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
-	int band = bitmap2band[priv->bitmap];
+	struct sk_buff *skb = NULL;
+	int band;
 
-	if (likely(band >= 0)) {
-		struct qdisc_skb_head *qh = band2list(priv, band);
-		struct sk_buff *skb = __qdisc_dequeue_head(qh);
+	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+		struct skb_array *q = band2list(priv, band);
 
-		if (likely(skb != NULL)) {
-			qdisc_qstats_backlog_dec(qdisc, skb);
-			qdisc_bstats_update(qdisc, skb);
-		}
+		if (__skb_array_empty(q))
+			continue;
 
-		qdisc->q.qlen--;
-		if (qh->qlen == 0)
-			priv->bitmap &= ~(1 << band);
-
-		return skb;
+		skb = skb_array_consume_bh(q);
+	}
+	if (likely(skb)) {
+		qdisc_qstats_cpu_backlog_dec(qdisc, skb);
+		qdisc_bstats_cpu_update(qdisc, skb);
+		qdisc_qstats_cpu_qlen_dec(qdisc);
 	}
 
-	return NULL;
+	return skb;
 }
 
 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 {
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
-	int band = bitmap2band[priv->bitmap];
+	struct sk_buff *skb = NULL;
+	int band;
 
-	if (band >= 0) {
-		struct qdisc_skb_head *qh = band2list(priv, band);
+	for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
+		struct skb_array *q = band2list(priv, band);
 
-		return qh->head;
+		skb = __skb_array_peek(q);
 	}
 
-	return NULL;
+	return skb;
 }
 
 static void pfifo_fast_reset(struct Qdisc *qdisc)
 {
-	int prio;
+	int i, band;
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 
-	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
-		__qdisc_reset_queue(band2list(priv, prio));
+	for (band = 0; band < PFIFO_FAST_BANDS; band++) {
+		struct skb_array *q = band2list(priv, band);
+		struct sk_buff *skb;
 
-	priv->bitmap = 0;
-	qdisc->qstats.backlog = 0;
-	qdisc->q.qlen = 0;
+		/* NULL ring is possible if destroy path is due to a failed
+		 * skb_array_init() in pfifo_fast_init() case.
+		 */
+		if (!q->ring.queue)
+			continue;
+
+		while ((skb = skb_array_consume_bh(q)) != NULL)
+			kfree_skb(skb);
+	}
+
+	for_each_possible_cpu(i) {
+		struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
+
+		q->backlog = 0;
+		q->qlen = 0;
+	}
 }
 
 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
@@ -570,19 +718,51 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
 	return -1;
 }
 
-static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
+static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
+			   struct netlink_ext_ack *extack)
 {
-	int prio;
+	unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+	int prio;
 
-	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
-		qdisc_skb_head_init(band2list(priv, prio));
+	/* guard against zero length rings */
+	if (!qlen)
+		return -EINVAL;
+
+	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+		struct skb_array *q = band2list(priv, prio);
+		int err;
+
+		err = skb_array_init(q, qlen, GFP_KERNEL);
+		if (err)
+			return -ENOMEM;
+	}
 
 	/* Can by-pass the queue discipline */
 	qdisc->flags |= TCQ_F_CAN_BYPASS;
 	return 0;
 }
 
+static void pfifo_fast_destroy(struct Qdisc *sch)
+{
+	struct pfifo_fast_priv *priv = qdisc_priv(sch);
+	int prio;
+
+	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
+		struct skb_array *q = band2list(priv, prio);
+
+		/* NULL ring is possible if destroy path is due to a failed
+		 * skb_array_init() in pfifo_fast_init() case.
+		 */
+		if (!q->ring.queue)
+			continue;
+		/* Destroy ring but no need to kfree_skb because a call to
+		 * pfifo_fast_reset() has already done that work.
+		 */
+		ptr_ring_cleanup(&q->ring, NULL);
+	}
+}
+
 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 	.id		=	"pfifo_fast",
 	.priv_size	=	sizeof(struct pfifo_fast_priv),
@@ -590,9 +770,11 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 	.dequeue	=	pfifo_fast_dequeue,
 	.peek		=	pfifo_fast_peek,
 	.init		=	pfifo_fast_init,
+	.destroy	=	pfifo_fast_destroy,
 	.reset		=	pfifo_fast_reset,
 	.dump		=	pfifo_fast_dump,
 	.owner		=	THIS_MODULE,
+	.static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
 };
 EXPORT_SYMBOL(pfifo_fast_ops);
 
@@ -600,7 +782,8 @@ static struct lock_class_key qdisc_tx_busylock;
 static struct lock_class_key qdisc_running_key;
 
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
-			  const struct Qdisc_ops *ops)
+			  const struct Qdisc_ops *ops,
+			  struct netlink_ext_ack *extack)
 {
 	void *p;
 	struct Qdisc *sch;
@@ -609,6 +792,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 	struct net_device *dev;
 
 	if (!dev_queue) {
+		NL_SET_ERR_MSG(extack, "No device queue given");
 		err = -EINVAL;
 		goto errout;
 	}
@@ -630,9 +814,24 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
 		sch->padded = (char *) sch - (char *) p;
 	}
+	__skb_queue_head_init(&sch->gso_skb);
+	__skb_queue_head_init(&sch->skb_bad_txq);
 	qdisc_skb_head_init(&sch->q);
 	spin_lock_init(&sch->q.lock);
 
+	if (ops->static_flags & TCQ_F_CPUSTATS) {
+		sch->cpu_bstats =
+			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+		if (!sch->cpu_bstats)
+			goto errout1;
+
+		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+		if (!sch->cpu_qstats) {
+			free_percpu(sch->cpu_bstats);
+			goto errout1;
+		}
+	}
+
 	spin_lock_init(&sch->busylock);
 	lockdep_set_class(&sch->busylock,
 			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
@@ -642,6 +841,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 			  dev->qdisc_running_key ?: &qdisc_running_key);
 
 	sch->ops = ops;
+	sch->flags = ops->static_flags;
 	sch->enqueue = ops->enqueue;
 	sch->dequeue = ops->dequeue;
 	sch->dev_queue = dev_queue;
@@ -649,27 +849,32 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 	refcount_set(&sch->refcnt, 1);
 
 	return sch;
+errout1:
+	kfree(p);
 errout:
 	return ERR_PTR(err);
 }
 
 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 				const struct Qdisc_ops *ops,
-				unsigned int parentid)
+				unsigned int parentid,
+				struct netlink_ext_ack *extack)
 {
 	struct Qdisc *sch;
 
-	if (!try_module_get(ops->owner))
+	if (!try_module_get(ops->owner)) {
+		NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
 		return NULL;
+	}
 
-	sch = qdisc_alloc(dev_queue, ops);
+	sch = qdisc_alloc(dev_queue, ops, extack);
 	if (IS_ERR(sch)) {
 		module_put(ops->owner);
 		return NULL;
 	}
 	sch->parent = parentid;
 
-	if (!ops->init || ops->init(sch, NULL) == 0)
+	if (!ops->init || ops->init(sch, NULL, extack) == 0)
 		return sch;
 
 	qdisc_destroy(sch);
@@ -682,17 +887,21 @@ EXPORT_SYMBOL(qdisc_create_dflt);
 void qdisc_reset(struct Qdisc *qdisc)
 {
 	const struct Qdisc_ops *ops = qdisc->ops;
+	struct sk_buff *skb, *tmp;
 
 	if (ops->reset)
 		ops->reset(qdisc);
 
-	kfree_skb(qdisc->skb_bad_txq);
-	qdisc->skb_bad_txq = NULL;
-
-	if (qdisc->gso_skb) {
-		kfree_skb_list(qdisc->gso_skb);
-		qdisc->gso_skb = NULL;
+	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
+		__skb_unlink(skb, &qdisc->gso_skb);
+		kfree_skb_list(skb);
 	}
+
+	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
+		__skb_unlink(skb, &qdisc->skb_bad_txq);
+		kfree_skb_list(skb);
+	}
+
 	qdisc->q.qlen = 0;
 	qdisc->qstats.backlog = 0;
 }
@@ -711,6 +920,7 @@ static void qdisc_free(struct Qdisc *qdisc)
 void qdisc_destroy(struct Qdisc *qdisc)
 {
 	const struct Qdisc_ops  *ops = qdisc->ops;
+	struct sk_buff *skb, *tmp;
 
 	if (qdisc->flags & TCQ_F_BUILTIN ||
 	    !refcount_dec_and_test(&qdisc->refcnt))
@@ -730,8 +940,16 @@ void qdisc_destroy(struct Qdisc *qdisc)
 	module_put(ops->owner);
 	dev_put(qdisc_dev(qdisc));
 
-	kfree_skb_list(qdisc->gso_skb);
-	kfree_skb(qdisc->skb_bad_txq);
+	skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
+		__skb_unlink(skb, &qdisc->gso_skb);
+		kfree_skb_list(skb);
+	}
+
+	skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
+		__skb_unlink(skb, &qdisc->skb_bad_txq);
+		kfree_skb_list(skb);
+	}
+
 	qdisc_free(qdisc);
 }
 EXPORT_SYMBOL(qdisc_destroy);
@@ -746,10 +964,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
 	root_lock = qdisc_lock(oqdisc);
 	spin_lock_bh(root_lock);
 
-	/* Prune old scheduler */
-	if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
-		qdisc_reset(oqdisc);
-
 	/* ... and graft new one */
 	if (qdisc == NULL)
 		qdisc = &noop_qdisc;
@@ -772,7 +986,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
 	if (dev->priv_flags & IFF_NO_QUEUE)
 		ops = &noqueue_qdisc_ops;
 
-	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
 	if (!qdisc) {
 		netdev_info(dev, "activation failed\n");
 		return;
@@ -795,7 +1009,7 @@ static void attach_default_qdiscs(struct net_device *dev)
 		dev->qdisc = txq->qdisc_sleeping;
 		qdisc_refcount_inc(dev->qdisc);
 	} else {
-		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
+		qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
 		if (qdisc) {
 			dev->qdisc = qdisc;
 			qdisc->ops->attach(qdisc);
@@ -885,14 +1099,18 @@ static bool some_qdisc_is_busy(struct net_device *dev)
 
 		dev_queue = netdev_get_tx_queue(dev, i);
 		q = dev_queue->qdisc_sleeping;
-		root_lock = qdisc_lock(q);
 
-		spin_lock_bh(root_lock);
+		if (q->flags & TCQ_F_NOLOCK) {
+			val = test_bit(__QDISC_STATE_SCHED, &q->state);
+		} else {
+			root_lock = qdisc_lock(q);
+			spin_lock_bh(root_lock);
 
-		val = (qdisc_is_running(q) ||
-		       test_bit(__QDISC_STATE_SCHED, &q->state));
+			val = (qdisc_is_running(q) ||
+			       test_bit(__QDISC_STATE_SCHED, &q->state));
 
-		spin_unlock_bh(root_lock);
+			spin_unlock_bh(root_lock);
+		}
 
 		if (val)
 			return true;
@@ -900,6 +1118,16 @@ static bool some_qdisc_is_busy(struct net_device *dev)
 	return false;
 }
 
+static void dev_qdisc_reset(struct net_device *dev,
+			    struct netdev_queue *dev_queue,
+			    void *none)
+{
+	struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+	if (qdisc)
+		qdisc_reset(qdisc);
+}
+
 /**
  * 	dev_deactivate_many - deactivate transmissions on several devices
  * 	@head: list of devices to deactivate
@@ -910,7 +1138,6 @@ static bool some_qdisc_is_busy(struct net_device *dev)
 void dev_deactivate_many(struct list_head *head)
 {
 	struct net_device *dev;
-	bool sync_needed = false;
 
 	list_for_each_entry(dev, head, close_list) {
 		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
@@ -920,20 +1147,25 @@ void dev_deactivate_many(struct list_head *head)
 					     &noop_qdisc);
 
 		dev_watchdog_down(dev);
-		sync_needed |= !dev->dismantle;
 	}
 
 	/* Wait for outstanding qdisc-less dev_queue_xmit calls.
 	 * This is avoided if all devices are in dismantle phase :
 	 * Caller will call synchronize_net() for us
 	 */
-	if (sync_needed)
-		synchronize_net();
+	synchronize_net();
 
 	/* Wait for outstanding qdisc_run calls. */
-	list_for_each_entry(dev, head, close_list)
+	list_for_each_entry(dev, head, close_list) {
 		while (some_qdisc_is_busy(dev))
 			yield();
+		/* The new qdisc is assigned at this point so we can safely
+		 * unwind stale skb lists and qdisc statistics
+		 */
+		netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
+		if (dev_ingress_queue(dev))
+			dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
+	}
 }
 
 void dev_deactivate(struct net_device *dev)
@@ -954,6 +1186,8 @@ static void dev_init_scheduler_queue(struct net_device *dev,
 
 	rcu_assign_pointer(dev_queue->qdisc, qdisc);
 	dev_queue->qdisc_sleeping = qdisc;
+	__skb_queue_head_init(&qdisc->gso_skb);
+	__skb_queue_head_init(&qdisc->skb_bad_txq);
 }
 
 void dev_init_scheduler(struct net_device *dev)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index bc30f91..cbe4831 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -306,12 +306,13 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 	struct tc_gred_sopt *sopt;
 	int i;
 
-	if (dps == NULL)
+	if (!dps)
 		return -EINVAL;
 
 	sopt = nla_data(dps);
 
-	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
+	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
+	    sopt->def_DP >= sopt->DPs)
 		return -EINVAL;
 
 	sch_tree_lock(sch);
@@ -391,7 +392,8 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
 	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
 };
 
-static int gred_change(struct Qdisc *sch, struct nlattr *opt)
+static int gred_change(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct gred_sched *table = qdisc_priv(sch);
 	struct tc_gred_qopt *ctl;
@@ -465,12 +467,13 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
 	return err;
 }
 
-static int gred_init(struct Qdisc *sch, struct nlattr *opt)
+static int gred_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct nlattr *tb[TCA_GRED_MAX + 1];
 	int err;
 
-	if (opt == NULL)
+	if (!opt)
 		return -EINVAL;
 
 	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d04068a..3ae9877 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -921,7 +921,8 @@ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
 
 static int
 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-		  struct nlattr **tca, unsigned long *arg)
+		  struct nlattr **tca, unsigned long *arg,
+		  struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct hfsc_class *cl = (struct hfsc_class *)*arg;
@@ -1033,7 +1034,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	if (cl == NULL)
 		return -ENOBUFS;
 
-	err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+	err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
 	if (err) {
 		kfree(cl);
 		return err;
@@ -1061,8 +1062,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	cl->cl_common.classid = classid;
 	cl->sched     = q;
 	cl->cl_parent = parent;
-	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-				      &pfifo_qdisc_ops, classid);
+	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+				      classid, NULL);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
 	else
@@ -1176,7 +1177,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 
 static int
 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		 struct Qdisc **old)
+		 struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct hfsc_class *cl = (struct hfsc_class *)arg;
 
@@ -1184,7 +1185,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 		return -EINVAL;
 	if (new == NULL) {
 		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					cl->cl_common.classid);
+					cl->cl_common.classid, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -1246,7 +1247,8 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 	cl->filter_cnt--;
 }
 
-static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
+					struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct hfsc_class *cl = (struct hfsc_class *)arg;
@@ -1388,7 +1390,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
 }
 
 static int
-hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+		struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct tc_hfsc_qopt *qopt;
@@ -1396,7 +1399,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 
-	if (opt == NULL || nla_len(opt) < sizeof(*qopt))
+	if (!opt || nla_len(opt) < sizeof(*qopt))
 		return -EINVAL;
 	qopt = nla_data(opt);
 
@@ -1406,14 +1409,14 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 		return err;
 	q->eligible = RB_ROOT;
 
-	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch);
+	err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
 	if (err)
 		return err;
 
 	q->root.cl_common.classid = sch->handle;
 	q->root.sched   = q;
 	q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					  sch->handle);
+					  sch->handle, NULL);
 	if (q->root.qdisc == NULL)
 		q->root.qdisc = &noop_qdisc;
 	else
@@ -1429,7 +1432,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 }
 
 static int
-hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt)
+hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
+		  struct netlink_ext_ack *extack)
 {
 	struct hfsc_sched *q = qdisc_priv(sch);
 	struct tc_hfsc_qopt *qopt;
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 73a53c0..bce2632 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -504,7 +504,8 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
 	[TCA_HHF_NON_HH_WEIGHT]	 = { .type = NLA_U32 },
 };
 
-static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct hhf_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_HHF_MAX + 1];
@@ -571,7 +572,8 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct hhf_sched_data *q = qdisc_priv(sch);
 	int i;
@@ -589,7 +591,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
 	q->hhf_non_hh_weight = 2;
 
 	if (opt) {
-		int err = hhf_change(sch, opt);
+		int err = hhf_change(sch, opt, extack);
 
 		if (err)
 			return err;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index fa03807..1ea9846 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1017,7 +1017,8 @@ static void htb_work_func(struct work_struct *work)
 	rcu_read_unlock();
 }
 
-static int htb_init(struct Qdisc *sch, struct nlattr *opt)
+static int htb_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct htb_sched *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_HTB_MAX + 1];
@@ -1031,7 +1032,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 	if (!opt)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -1171,7 +1172,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 }
 
 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct htb_class *cl = (struct htb_class *)arg;
 
@@ -1179,7 +1180,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 		return -EINVAL;
 	if (new == NULL &&
 	    (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-				     cl->common.classid)) == NULL)
+				     cl->common.classid, extack)) == NULL)
 		return -ENOBUFS;
 
 	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
@@ -1289,7 +1290,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
 	if (!cl->level && htb_parent_last_child(cl)) {
 		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					  cl->parent->common.classid);
+					  cl->parent->common.classid,
+					  NULL);
 		last_child = 1;
 	}
 
@@ -1326,7 +1328,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
 
 static int htb_change_class(struct Qdisc *sch, u32 classid,
 			    u32 parentid, struct nlattr **tca,
-			    unsigned long *arg)
+			    unsigned long *arg, struct netlink_ext_ack *extack)
 {
 	int err = -EINVAL;
 	struct htb_sched *q = qdisc_priv(sch);
@@ -1356,10 +1358,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 
 	/* Keeping backward compatible with rate_table based iproute2 tc */
 	if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
-		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
+		qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
+					      NULL));
 
 	if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
-		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
+		qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
+					      NULL));
 
 	if (!cl) {		/* new class */
 		struct Qdisc *new_q;
@@ -1394,7 +1398,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 		if (!cl)
 			goto failure;
 
-		err = tcf_block_get(&cl->block, &cl->filter_list, sch);
+		err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
 		if (err) {
 			kfree(cl);
 			goto failure;
@@ -1423,8 +1427,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 		 * so that can't be used inside of sch_tree_lock
 		 * -- thanks to Karlis Peisenieks
 		 */
-		new_q = qdisc_create_dflt(sch->dev_queue,
-					  &pfifo_qdisc_ops, classid);
+		new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					  classid, NULL);
 		sch_tree_lock(sch);
 		if (parent && !parent->level) {
 			unsigned int qlen = parent->un.leaf.q->q.qlen;
@@ -1524,7 +1528,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
 	return err;
 }
 
-static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
+static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
+				       struct netlink_ext_ack *extack)
 {
 	struct htb_sched *q = qdisc_priv(sch);
 	struct htb_class *cl = (struct htb_class *)arg;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index fc1286f..7ca2be2 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -48,7 +48,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 {
 }
 
-static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
+					   struct netlink_ext_ack *extack)
 {
 	struct ingress_sched_data *q = qdisc_priv(sch);
 
@@ -62,7 +63,8 @@ static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
 	mini_qdisc_pair_swap(miniqp, tp_head);
 }
 
-static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
+static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct ingress_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
@@ -76,7 +78,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
 	q->block_info.chain_head_change = clsact_chain_head_change;
 	q->block_info.chain_head_change_priv = &q->miniqp;
 
-	err = tcf_block_get_ext(&q->block, sch, &q->block_info);
+	err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
 	if (err)
 		return err;
 
@@ -153,7 +155,8 @@ static unsigned long clsact_bind_filter(struct Qdisc *sch,
 	return clsact_find(sch, classid);
 }
 
-static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct clsact_sched_data *q = qdisc_priv(sch);
 
@@ -167,7 +170,8 @@ static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl)
 	}
 }
 
-static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
+static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct clsact_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
@@ -182,7 +186,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
 	q->ingress_block_info.chain_head_change = clsact_chain_head_change;
 	q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
 
-	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info);
+	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
+				extack);
 	if (err)
 		return err;
 
@@ -192,7 +197,8 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
 	q->egress_block_info.chain_head_change = clsact_chain_head_change;
 	q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
 
-	err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info);
+	err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info,
+				extack);
 	if (err)
 		return err;
 
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 213b586..f062a18 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -17,6 +17,7 @@
 #include <linux/skbuff.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/sch_generic.h>
 
 struct mq_sched {
 	struct Qdisc		**qdiscs;
@@ -35,7 +36,8 @@ static void mq_destroy(struct Qdisc *sch)
 	kfree(priv->qdiscs);
 }
 
-static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+static int mq_init(struct Qdisc *sch, struct nlattr *opt,
+		   struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct mq_sched *priv = qdisc_priv(sch);
@@ -59,7 +61,8 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
 		dev_queue = netdev_get_tx_queue(dev, ntx);
 		qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
-						    TC_H_MIN(ntx + 1)));
+						    TC_H_MIN(ntx + 1)),
+					  extack);
 		if (!qdisc)
 			return -ENOMEM;
 		priv->qdiscs[ntx] = qdisc;
@@ -97,23 +100,42 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
 	struct net_device *dev = qdisc_dev(sch);
 	struct Qdisc *qdisc;
 	unsigned int ntx;
+	__u32 qlen = 0;
 
 	sch->q.qlen = 0;
 	memset(&sch->bstats, 0, sizeof(sch->bstats));
 	memset(&sch->qstats, 0, sizeof(sch->qstats));
 
+	/* MQ supports lockless qdiscs. However, statistics accounting needs
+	 * to account for all, none, or a mix of locked and unlocked child
+	 * qdiscs. Percpu stats are added to counters in-band and locking
+	 * qdisc totals are added at end.
+	 */
 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
 		spin_lock_bh(qdisc_lock(qdisc));
-		sch->q.qlen		+= qdisc->q.qlen;
-		sch->bstats.bytes	+= qdisc->bstats.bytes;
-		sch->bstats.packets	+= qdisc->bstats.packets;
-		sch->qstats.backlog	+= qdisc->qstats.backlog;
-		sch->qstats.drops	+= qdisc->qstats.drops;
-		sch->qstats.requeues	+= qdisc->qstats.requeues;
-		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+
+		if (qdisc_is_percpu_stats(qdisc)) {
+			qlen = qdisc_qlen_sum(qdisc);
+			__gnet_stats_copy_basic(NULL, &sch->bstats,
+						qdisc->cpu_bstats,
+						&qdisc->bstats);
+			__gnet_stats_copy_queue(&sch->qstats,
+						qdisc->cpu_qstats,
+						&qdisc->qstats, qlen);
+		} else {
+			sch->q.qlen		+= qdisc->q.qlen;
+			sch->bstats.bytes	+= qdisc->bstats.bytes;
+			sch->bstats.packets	+= qdisc->bstats.packets;
+			sch->qstats.backlog	+= qdisc->qstats.backlog;
+			sch->qstats.drops	+= qdisc->qstats.drops;
+			sch->qstats.requeues	+= qdisc->qstats.requeues;
+			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+		}
+
 		spin_unlock_bh(qdisc_lock(qdisc));
 	}
+
 	return 0;
 }
 
@@ -134,7 +156,7 @@ static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
 }
 
 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-		    struct Qdisc **old)
+		    struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 	struct net_device *dev = qdisc_dev(sch);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index b85885a9..0e9d761 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -132,7 +132,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 	return 0;
 }
 
-static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct mqprio_sched *priv = qdisc_priv(sch);
@@ -229,7 +230,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
 		qdisc = qdisc_create_dflt(dev_queue,
 					  get_default_qdisc_ops(dev, i),
 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
-						    TC_H_MIN(i + 1)));
+						    TC_H_MIN(i + 1)), extack);
 		if (!qdisc)
 			return -ENOMEM;
 
@@ -319,7 +320,7 @@ static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
 }
 
 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
-		    struct Qdisc **old)
+			struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
@@ -388,22 +389,40 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
 	struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
 	struct tc_mqprio_qopt opt = { 0 };
 	struct Qdisc *qdisc;
-	unsigned int i;
+	unsigned int ntx, tc;
 
 	sch->q.qlen = 0;
 	memset(&sch->bstats, 0, sizeof(sch->bstats));
 	memset(&sch->qstats, 0, sizeof(sch->qstats));
 
-	for (i = 0; i < dev->num_tx_queues; i++) {
-		qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+	/* MQ supports lockless qdiscs. However, statistics accounting needs
+	 * to account for all, none, or a mix of locked and unlocked child
+	 * qdiscs. Percpu stats are added to counters in-band and locking
+	 * qdisc totals are added at end.
+	 */
+	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
 		spin_lock_bh(qdisc_lock(qdisc));
-		sch->q.qlen		+= qdisc->q.qlen;
-		sch->bstats.bytes	+= qdisc->bstats.bytes;
-		sch->bstats.packets	+= qdisc->bstats.packets;
-		sch->qstats.backlog	+= qdisc->qstats.backlog;
-		sch->qstats.drops	+= qdisc->qstats.drops;
-		sch->qstats.requeues	+= qdisc->qstats.requeues;
-		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+
+		if (qdisc_is_percpu_stats(qdisc)) {
+			__u32 qlen = qdisc_qlen_sum(qdisc);
+
+			__gnet_stats_copy_basic(NULL, &sch->bstats,
+						qdisc->cpu_bstats,
+						&qdisc->bstats);
+			__gnet_stats_copy_queue(&sch->qstats,
+						qdisc->cpu_qstats,
+						&qdisc->qstats, qlen);
+		} else {
+			sch->q.qlen		+= qdisc->q.qlen;
+			sch->bstats.bytes	+= qdisc->bstats.bytes;
+			sch->bstats.packets	+= qdisc->bstats.packets;
+			sch->qstats.backlog	+= qdisc->qstats.backlog;
+			sch->qstats.drops	+= qdisc->qstats.drops;
+			sch->qstats.requeues	+= qdisc->qstats.requeues;
+			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+		}
+
 		spin_unlock_bh(qdisc_lock(qdisc));
 	}
 
@@ -411,9 +430,9 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
 	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
 	opt.hw = priv->hw_offload;
 
-	for (i = 0; i < netdev_get_num_tc(dev); i++) {
-		opt.count[i] = dev->tc_to_txq[i].count;
-		opt.offset[i] = dev->tc_to_txq[i].offset;
+	for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
+		opt.count[tc] = dev->tc_to_txq[tc].count;
+		opt.offset[tc] = dev->tc_to_txq[tc].offset;
 	}
 
 	if (nla_put(skb, TCA_OPTIONS, NLA_ALIGN(sizeof(opt)), &opt))
@@ -495,7 +514,6 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 	if (cl >= TC_H_MIN_PRIORITY) {
 		int i;
 		__u32 qlen = 0;
-		struct Qdisc *qdisc;
 		struct gnet_stats_queue qstats = {0};
 		struct gnet_stats_basic_packed bstats = {0};
 		struct net_device *dev = qdisc_dev(sch);
@@ -511,18 +529,26 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 
 		for (i = tc.offset; i < tc.offset + tc.count; i++) {
 			struct netdev_queue *q = netdev_get_tx_queue(dev, i);
+			struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
+			struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
+			struct gnet_stats_queue __percpu *cpu_qstats = NULL;
 
-			qdisc = rtnl_dereference(q->qdisc);
 			spin_lock_bh(qdisc_lock(qdisc));
-			qlen		  += qdisc->q.qlen;
-			bstats.bytes      += qdisc->bstats.bytes;
-			bstats.packets    += qdisc->bstats.packets;
-			qstats.backlog    += qdisc->qstats.backlog;
-			qstats.drops      += qdisc->qstats.drops;
-			qstats.requeues   += qdisc->qstats.requeues;
-			qstats.overlimits += qdisc->qstats.overlimits;
+			if (qdisc_is_percpu_stats(qdisc)) {
+				cpu_bstats = qdisc->cpu_bstats;
+				cpu_qstats = qdisc->cpu_qstats;
+			}
+
+			qlen = qdisc_qlen_sum(qdisc);
+			__gnet_stats_copy_basic(NULL, &sch->bstats,
+						cpu_bstats, &qdisc->bstats);
+			__gnet_stats_copy_queue(&sch->qstats,
+						cpu_qstats,
+						&qdisc->qstats,
+						qlen);
 			spin_unlock_bh(qdisc_lock(qdisc));
 		}
+
 		/* Reclaim root sleeping lock before completing stats */
 		if (d->lock)
 			spin_lock_bh(d->lock);
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 0122163..1da7ea8 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -180,7 +180,8 @@ multiq_destroy(struct Qdisc *sch)
 	kfree(q->queues);
 }
 
-static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 	struct tc_multiq_qopt *qopt;
@@ -215,7 +216,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
 			child = qdisc_create_dflt(sch->dev_queue,
 						  &pfifo_qdisc_ops,
 						  TC_H_MAKE(sch->handle,
-							    i + 1));
+							    i + 1), extack);
 			if (child) {
 				sch_tree_lock(sch);
 				old = q->queues[i];
@@ -236,17 +237,18 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
+static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 	int i, err;
 
 	q->queues = NULL;
 
-	if (opt == NULL)
+	if (!opt)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -258,7 +260,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
 	for (i = 0; i < q->max_bands; i++)
 		q->queues[i] = &noop_qdisc;
 
-	return multiq_tune(sch, opt);
+	return multiq_tune(sch, opt, extack);
 }
 
 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -281,7 +283,7 @@ static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
 }
 
 static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		      struct Qdisc **old)
+			struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 	unsigned long band = arg - 1;
@@ -369,7 +371,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 	}
 }
 
-static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
+					  struct netlink_ext_ack *extack)
 {
 	struct multiq_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index dd70924..7bbc13b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -893,7 +893,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
 }
 
 /* Parse netlink message to set options */
-static int netem_change(struct Qdisc *sch, struct nlattr *opt)
+static int netem_change(struct Qdisc *sch, struct nlattr *opt,
+			struct netlink_ext_ack *extack)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_NETEM_MAX + 1];
@@ -984,7 +985,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 	return ret;
 }
 
-static int netem_init(struct Qdisc *sch, struct nlattr *opt)
+static int netem_init(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 	int ret;
@@ -995,7 +997,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
 		return -EINVAL;
 
 	q->loss_model = CLG_RANDOM;
-	ret = netem_change(sch, opt);
+	ret = netem_change(sch, opt, extack);
 	if (ret)
 		pr_info("netem: change failed\n");
 	return ret;
@@ -1157,7 +1159,7 @@ static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 776c694..18d30bb 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -181,7 +181,8 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
 	[TCA_PIE_BYTEMODE] = {.type = NLA_U32},
 };
 
-static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct pie_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_PIE_MAX + 1];
@@ -439,7 +440,8 @@ static void pie_timer(struct timer_list *t)
 
 }
 
-static int pie_init(struct Qdisc *sch, struct nlattr *opt)
+static int pie_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct pie_sched_data *q = qdisc_priv(sch);
 
@@ -451,7 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
 	timer_setup(&q->adapt_timer, pie_timer, 0);
 
 	if (opt) {
-		int err = pie_change(sch, opt);
+		int err = pie_change(sch, opt, extack);
 
 		if (err)
 			return err;
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 1c6cbab..5619d2e 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -123,7 +123,8 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
 	return qdisc_dequeue_head(sch);
 }
 
-static int plug_init(struct Qdisc *sch, struct nlattr *opt)
+static int plug_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct plug_sched_data *q = qdisc_priv(sch);
 
@@ -158,7 +159,8 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
  *   command is received (just act as a pass-thru queue).
  * TCQ_PLUG_LIMIT: Increase/decrease queue size
  */
-static int plug_change(struct Qdisc *sch, struct nlattr *opt)
+static int plug_change(struct Qdisc *sch, struct nlattr *opt,
+		       struct netlink_ext_ack *extack)
 {
 	struct plug_sched_data *q = qdisc_priv(sch);
 	struct tc_plug_qopt *msg;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2c79559..fe1510e 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -153,7 +153,8 @@ prio_destroy(struct Qdisc *sch)
 		qdisc_destroy(q->queues[prio]);
 }
 
-static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *queues[TCQ_PRIO_BANDS];
@@ -175,7 +176,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 	/* Before commit, make sure we can allocate all new qdiscs */
 	for (i = oldbands; i < qopt->bands; i++) {
 		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
-					      TC_H_MAKE(sch->handle, i + 1));
+					      TC_H_MAKE(sch->handle, i + 1),
+					      extack);
 		if (!queues[i]) {
 			while (i > oldbands)
 				qdisc_destroy(queues[--i]);
@@ -205,7 +207,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int prio_init(struct Qdisc *sch, struct nlattr *opt)
+static int prio_init(struct Qdisc *sch, struct nlattr *opt,
+		     struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int err;
@@ -213,11 +216,11 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
 	if (!opt)
 		return -EINVAL;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
-	return prio_tune(sch, opt);
+	return prio_tune(sch, opt, extack);
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -240,7 +243,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
 }
 
 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		      struct Qdisc **old)
+		      struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	unsigned long band = arg - 1;
@@ -327,7 +330,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 	}
 }
 
-static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
+					struct netlink_ext_ack *extack)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 6962b37..bb1a9c1 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -402,7 +402,8 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
 }
 
 static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct nlattr **tca, unsigned long *arg)
+			    struct nlattr **tca, unsigned long *arg,
+			    struct netlink_ext_ack *extack)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 	struct qfq_class *cl = (struct qfq_class *)*arg;
@@ -479,8 +480,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
 	cl->common.classid = classid;
 	cl->deficit = lmax;
 
-	cl->qdisc = qdisc_create_dflt(sch->dev_queue,
-				      &pfifo_qdisc_ops, classid);
+	cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+				      classid, NULL);
 	if (cl->qdisc == NULL)
 		cl->qdisc = &noop_qdisc;
 
@@ -564,7 +565,8 @@ static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
 	return (unsigned long)qfq_find_class(sch, classid);
 }
 
-static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 
@@ -593,13 +595,14 @@ static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 }
 
 static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
-			   struct Qdisc *new, struct Qdisc **old)
+			   struct Qdisc *new, struct Qdisc **old,
+			   struct netlink_ext_ack *extack)
 {
 	struct qfq_class *cl = (struct qfq_class *)arg;
 
 	if (new == NULL) {
-		new = qdisc_create_dflt(sch->dev_queue,
-					&pfifo_qdisc_ops, cl->common.classid);
+		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+					cl->common.classid, NULL);
 		if (new == NULL)
 			new = &noop_qdisc;
 	}
@@ -1413,14 +1416,15 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
 	qfq_deactivate_class(q, cl);
 }
 
-static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
+static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
+			  struct netlink_ext_ack *extack)
 {
 	struct qfq_sched *q = qdisc_priv(sch);
 	struct qfq_group *grp;
 	int i, j, err;
 	u32 max_cl_shift, maxbudg_shift, max_classes;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index f0747eb8..0af1c12 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -157,7 +157,6 @@ static int red_offload(struct Qdisc *sch, bool enable)
 		.handle = sch->handle,
 		.parent = sch->parent,
 	};
-	int err;
 
 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
 		return -EOPNOTSUPP;
@@ -172,14 +171,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
 		opt.command = TC_RED_DESTROY;
 	}
 
-	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
-
-	if (!err && enable)
-		sch->flags |= TCQ_F_OFFLOADED;
-	else
-		sch->flags &= ~TCQ_F_OFFLOADED;
-
-	return err;
+	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
 }
 
 static void red_destroy(struct Qdisc *sch)
@@ -197,7 +189,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
 	[TCA_RED_MAX_P] = { .type = NLA_U32 },
 };
 
-static int red_change(struct Qdisc *sch, struct nlattr *opt)
+static int red_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct nlattr *tb[TCA_RED_MAX + 1];
@@ -224,7 +217,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
 		return -EINVAL;
 
 	if (ctl->limit > 0) {
-		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
+		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
+					 extack);
 		if (IS_ERR(child))
 			return PTR_ERR(child);
 	}
@@ -272,14 +266,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
 	spin_unlock(root_lock);
 }
 
-static int red_init(struct Qdisc *sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
 	q->qdisc = &noop_qdisc;
 	q->sch = sch;
 	timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
-	return red_change(sch, opt);
+	return red_change(sch, opt, extack);
 }
 
 static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
@@ -294,12 +289,22 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
 			.stats.qstats = &sch->qstats,
 		},
 	};
+	int err;
 
-	if (!(sch->flags & TCQ_F_OFFLOADED))
+	sch->flags &= ~TCQ_F_OFFLOADED;
+
+	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
 		return 0;
 
-	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
-					     &hw_stats);
+	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+					    &hw_stats);
+	if (err == -EOPNOTSUPP)
+		return 0;
+
+	if (!err)
+		sch->flags |= TCQ_F_OFFLOADED;
+
+	return err;
 }
 
 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -339,32 +344,24 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
-	struct tc_red_xstats st = {
-		.early	= q->stats.prob_drop + q->stats.forced_drop,
-		.pdrop	= q->stats.pdrop,
-		.other	= q->stats.other,
-		.marked	= q->stats.prob_mark + q->stats.forced_mark,
-	};
+	struct tc_red_xstats st = {0};
 
 	if (sch->flags & TCQ_F_OFFLOADED) {
-		struct red_stats hw_stats = {0};
 		struct tc_red_qopt_offload hw_stats_request = {
 			.command = TC_RED_XSTATS,
 			.handle = sch->handle,
 			.parent = sch->parent,
 			{
-				.xstats = &hw_stats,
+				.xstats = &q->stats,
 			},
 		};
-		if (!dev->netdev_ops->ndo_setup_tc(dev,
-						   TC_SETUP_QDISC_RED,
-						   &hw_stats_request)) {
-			st.early += hw_stats.prob_drop + hw_stats.forced_drop;
-			st.pdrop += hw_stats.pdrop;
-			st.other += hw_stats.other;
-			st.marked += hw_stats.prob_mark + hw_stats.forced_mark;
-		}
+		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
+					      &hw_stats_request);
 	}
+	st.early = q->stats.prob_drop + q->stats.forced_drop;
+	st.pdrop = q->stats.pdrop;
+	st.other = q->stats.other;
+	st.marked = q->stats.prob_mark + q->stats.forced_mark;
 
 	return gnet_stats_copy_app(d, &st, sizeof(st));
 }
@@ -380,7 +377,7 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0678deb..7cbdad8 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -488,7 +488,8 @@ static const struct tc_sfb_qopt sfb_default_ops = {
 	.penalty_burst = 20,
 };
 
-static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *child;
@@ -512,7 +513,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 	if (limit == 0)
 		limit = qdisc_dev(sch)->tx_queue_len;
 
-	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
+	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
 	if (IS_ERR(child))
 		return PTR_ERR(child);
 
@@ -549,17 +550,18 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
 	return 0;
 }
 
-static int sfb_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 	int err;
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
 	q->qdisc = &noop_qdisc;
-	return sfb_change(sch, opt);
+	return sfb_change(sch, opt, extack);
 }
 
 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -615,7 +617,7 @@ static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 
@@ -643,7 +645,8 @@ static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
 }
 
 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
-			    struct nlattr **tca, unsigned long *arg)
+			    struct nlattr **tca, unsigned long *arg,
+			    struct netlink_ext_ack *extack)
 {
 	return -ENOSYS;
 }
@@ -665,7 +668,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 	}
 }
 
-static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct sfb_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 930e5bd..2f26781 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -721,7 +721,8 @@ static void sfq_destroy(struct Qdisc *sch)
 	kfree(q->red_parms);
 }
 
-static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
 	int i;
@@ -730,7 +731,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 	q->sch = sch;
 	timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
 
-	err = tcf_block_get(&q->block, &q->filter_list, sch);
+	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
 	if (err)
 		return err;
 
@@ -836,7 +837,8 @@ static void sfq_unbind(struct Qdisc *q, unsigned long cl)
 {
 }
 
-static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
+static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 120f4f3..83e76d0 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -302,7 +302,8 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
 	[TCA_TBF_PBURST] = { .type = NLA_U32 },
 };
 
-static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+		      struct netlink_ext_ack *extack)
 {
 	int err;
 	struct tbf_sched_data *q = qdisc_priv(sch);
@@ -326,11 +327,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 	qopt = nla_data(tb[TCA_TBF_PARMS]);
 	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
 		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
-					      tb[TCA_TBF_RTAB]));
+					      tb[TCA_TBF_RTAB],
+					      NULL));
 
 	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
 			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
-						      tb[TCA_TBF_PTAB]));
+						      tb[TCA_TBF_PTAB],
+						      NULL));
 
 	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
 	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
@@ -383,7 +386,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 		if (err)
 			goto done;
 	} else if (qopt->limit > 0) {
-		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
+					 extack);
 		if (IS_ERR(child)) {
 			err = PTR_ERR(child);
 			goto done;
@@ -421,19 +425,20 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 	return err;
 }
 
-static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
+		    struct netlink_ext_ack *extack)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
 	qdisc_watchdog_init(&q->watchdog, sch);
 	q->qdisc = &noop_qdisc;
 
-	if (opt == NULL)
+	if (!opt)
 		return -EINVAL;
 
 	q->t_c = ktime_get_ns();
 
-	return tbf_change(sch, opt);
+	return tbf_change(sch, opt, extack);
 }
 
 static void tbf_destroy(struct Qdisc *sch)
@@ -494,7 +499,7 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
 }
 
 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
-		     struct Qdisc **old)
+		     struct Qdisc **old, struct netlink_ext_ack *extack)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 9fe6b42..93f04cf 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -167,7 +167,8 @@ teql_destroy(struct Qdisc *sch)
 	}
 }
 
-static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
+static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
+			   struct netlink_ext_ack *extack)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct teql_master *m = (struct teql_master *)sch->ops;
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig
index d9c04dc..c740b18 100644
--- a/net/sctp/Kconfig
+++ b/net/sctp/Kconfig
@@ -37,18 +37,6 @@
 
 if IP_SCTP
 
-config NET_SCTPPROBE
-	tristate "SCTP: Association probing"
-        depends on PROC_FS && KPROBES
-        ---help---
-        This module allows for capturing the changes to SCTP association
-        state in response to incoming packets. It is used for debugging
-        SCTP congestion control algorithms. If you don't understand
-        what was just said, you don't need it: say N.
-
-        To compile this code as a module, choose M here: the
-        module will be called sctp_probe.
-
 config SCTP_DBG_OBJCNT
 	bool "SCTP: Debug object counts"
 	depends on PROC_FS
diff --git a/net/sctp/Makefile b/net/sctp/Makefile
index 1ca84a2..6776582 100644
--- a/net/sctp/Makefile
+++ b/net/sctp/Makefile
@@ -4,7 +4,6 @@
 #
 
 obj-$(CONFIG_IP_SCTP) += sctp.o
-obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
 obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o
 
 sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
@@ -14,9 +13,7 @@
 	  tsnmap.o bind_addr.o socket.o primitive.o \
 	  output.o input.o debug.o stream.o auth.o \
 	  offload.o stream_sched.o stream_sched_prio.o \
-	  stream_sched_rr.o
-
-sctp_probe-y := probe.o
+	  stream_sched_rr.o stream_interleave.o
 
 sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o
 sctp-$(CONFIG_PROC_FS) += proc.o
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 69394f4..837806d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -861,7 +861,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
 		event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
 					0, spc_state, error, GFP_ATOMIC);
 		if (event)
-			sctp_ulpq_tail_event(&asoc->ulpq, event);
+			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
 	}
 
 	/* Select new active and retran paths. */
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 7f8baa4..991a530 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -124,7 +124,7 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
 			ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
 							    error, GFP_ATOMIC);
 			if (ev)
-				sctp_ulpq_tail_event(&asoc->ulpq, ev);
+				asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 		}
 
 		sctp_chunk_put(chunk);
@@ -191,7 +191,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 	 */
 	max_data = asoc->pathmtu -
 		   sctp_sk(asoc->base.sk)->pf->af->net_header_len -
-		   sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+		   sizeof(struct sctphdr) - sctp_datachk_len(&asoc->stream);
 	max_data = SCTP_TRUNC4(max_data);
 
 	/* If the the peer requested that we authenticate DATA chunks
@@ -264,8 +264,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 				frag |= SCTP_DATA_SACK_IMM;
 		}
 
-		chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag,
-						 0, GFP_KERNEL);
+		chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
+						       GFP_KERNEL);
 		if (!chunk) {
 			err = -ENOMEM;
 			goto errout;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index ee1e601..8b31468 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -232,7 +232,7 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
 {
 	ep->base.dead = true;
 
-	ep->base.sk->sk_state = SCTP_SS_CLOSED;
+	inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
 
 	/* Unlink this endpoint, so we can't find it again! */
 	sctp_unhash_endpoint(ep);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 4a865cd..01a26ee0 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -313,6 +313,7 @@ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
 	/* We believe that this chunk is OK to add to the packet */
 	switch (chunk->chunk_hdr->type) {
 	case SCTP_CID_DATA:
+	case SCTP_CID_I_DATA:
 		/* Account for the data being in the packet */
 		sctp_packet_append_data(packet, chunk);
 		/* Disallow SACK bundling after DATA. */
@@ -724,7 +725,7 @@ static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
 	 * or delay in hopes of bundling a full sized packet.
 	 */
 	if (chunk->skb->len + q->out_qlen > transport->pathmtu -
-		packet->overhead - sizeof(struct sctp_data_chunk) - 4)
+	    packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
 		/* Enough data queued to fill a packet */
 		return SCTP_XMIT_OK;
 
@@ -759,7 +760,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
 
 	asoc->peer.rwnd = rwnd;
 	sctp_chunk_assign_tsn(chunk);
-	sctp_chunk_assign_ssn(chunk);
+	asoc->stream.si->assign_number(chunk);
 }
 
 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 7d67fee..af9b5eb 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -67,8 +67,6 @@ static void sctp_mark_missing(struct sctp_outq *q,
 			      __u32 highest_new_tsn,
 			      int count_of_newacks);
 
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
-
 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
 
 /* Add data to the front of the queue. */
@@ -591,7 +589,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
 	 * following the procedures outlined in C1 - C5.
 	 */
 	if (reason == SCTP_RTXR_T3_RTX)
-		sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
+		q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
 
 	/* Flush the queues only on timeout, since fast_rtx is only
 	 * triggered during sack processing and the queue
@@ -942,6 +940,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 		case SCTP_CID_ECN_ECNE:
 		case SCTP_CID_ASCONF:
 		case SCTP_CID_FWD_TSN:
+		case SCTP_CID_I_FWD_TSN:
 		case SCTP_CID_RECONF:
 			status = sctp_packet_transmit_chunk(packet, chunk,
 							    one_packet, gfp);
@@ -956,7 +955,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 			 * sender MUST assure that at least one T3-rtx
 			 * timer is running.
 			 */
-			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
+			    chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
 				sctp_transport_reset_t3_rtx(transport);
 				transport->last_time_sent = jiffies;
 			}
@@ -1372,7 +1372,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
 
 	asoc->peer.rwnd = sack_a_rwnd;
 
-	sctp_generate_fwdtsn(q, sack_ctsn);
+	asoc->stream.si->generate_ftsn(q, sack_ctsn);
 
 	pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
 	pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
@@ -1795,7 +1795,7 @@ static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
 }
 
 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
 {
 	struct sctp_association *asoc = q->asoc;
 	struct sctp_chunk *ftsn_chunk = NULL;
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
deleted file mode 100644
index 1280f85..0000000
--- a/net/sctp/probe.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * sctp_probe - Observe the SCTP flow with kprobes.
- *
- * The idea for this came from Werner Almesberger's umlsim
- * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
- *
- * Modified for SCTP from Stephen Hemminger's code
- * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/kprobes.h>
-#include <linux/socket.h>
-#include <linux/sctp.h>
-#include <linux/proc_fs.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/kfifo.h>
-#include <linux/time.h>
-#include <net/net_namespace.h>
-
-#include <net/sctp/sctp.h>
-#include <net/sctp/sm.h>
-
-MODULE_SOFTDEP("pre: sctp");
-MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
-MODULE_DESCRIPTION("SCTP snooper");
-MODULE_LICENSE("GPL");
-
-static int port __read_mostly = 0;
-MODULE_PARM_DESC(port, "Port to match (0=all)");
-module_param(port, int, 0);
-
-static unsigned int fwmark __read_mostly = 0;
-MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
-module_param(fwmark, uint, 0);
-
-static int bufsize __read_mostly = 64 * 1024;
-MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
-module_param(bufsize, int, 0);
-
-static int full __read_mostly = 1;
-MODULE_PARM_DESC(full, "Full log (1=every ack packet received,  0=only cwnd changes)");
-module_param(full, int, 0);
-
-static const char procname[] = "sctpprobe";
-
-static struct {
-	struct kfifo	  fifo;
-	spinlock_t	  lock;
-	wait_queue_head_t wait;
-	struct timespec64 tstart;
-} sctpw;
-
-static __printf(1, 2) void printl(const char *fmt, ...)
-{
-	va_list args;
-	int len;
-	char tbuf[256];
-
-	va_start(args, fmt);
-	len = vscnprintf(tbuf, sizeof(tbuf), fmt, args);
-	va_end(args);
-
-	kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
-	wake_up(&sctpw.wait);
-}
-
-static int sctpprobe_open(struct inode *inode, struct file *file)
-{
-	kfifo_reset(&sctpw.fifo);
-	ktime_get_ts64(&sctpw.tstart);
-
-	return 0;
-}
-
-static ssize_t sctpprobe_read(struct file *file, char __user *buf,
-			      size_t len, loff_t *ppos)
-{
-	int error = 0, cnt = 0;
-	unsigned char *tbuf;
-
-	if (!buf)
-		return -EINVAL;
-
-	if (len == 0)
-		return 0;
-
-	tbuf = vmalloc(len);
-	if (!tbuf)
-		return -ENOMEM;
-
-	error = wait_event_interruptible(sctpw.wait,
-					 kfifo_len(&sctpw.fifo) != 0);
-	if (error)
-		goto out_free;
-
-	cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock);
-	error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
-
-out_free:
-	vfree(tbuf);
-
-	return error ? error : cnt;
-}
-
-static const struct file_operations sctpprobe_fops = {
-	.owner	= THIS_MODULE,
-	.open	= sctpprobe_open,
-	.read	= sctpprobe_read,
-	.llseek = noop_llseek,
-};
-
-static enum sctp_disposition jsctp_sf_eat_sack(
-					struct net *net,
-					const struct sctp_endpoint *ep,
-					const struct sctp_association *asoc,
-					const union sctp_subtype type,
-					void *arg,
-					struct sctp_cmd_seq *commands)
-{
-	struct sctp_chunk *chunk = arg;
-	struct sk_buff *skb = chunk->skb;
-	struct sctp_transport *sp;
-	static __u32 lcwnd = 0;
-	struct timespec64 now;
-
-	sp = asoc->peer.primary_path;
-
-	if (((port == 0 && fwmark == 0) ||
-	     asoc->peer.port == port ||
-	     ep->base.bind_addr.port == port ||
-	     (fwmark > 0 && skb->mark == fwmark)) &&
-	    (full || sp->cwnd != lcwnd)) {
-		lcwnd = sp->cwnd;
-
-		ktime_get_ts64(&now);
-		now = timespec64_sub(now, sctpw.tstart);
-
-		printl("%lu.%06lu ", (unsigned long) now.tv_sec,
-		       (unsigned long) now.tv_nsec / NSEC_PER_USEC);
-
-		printl("%p %5d %5d %5d %8d %5d ", asoc,
-		       ep->base.bind_addr.port, asoc->peer.port,
-		       asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data);
-
-		list_for_each_entry(sp, &asoc->peer.transport_addr_list,
-					transports) {
-			if (sp == asoc->peer.primary_path)
-				printl("*");
-
-			printl("%pISc %2u %8u %8u %8u %8u %8u ",
-			       &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
-			       sp->flight_size, sp->partial_bytes_acked,
-			       sp->pathmtu);
-		}
-		printl("\n");
-	}
-
-	jprobe_return();
-	return 0;
-}
-
-static struct jprobe sctp_recv_probe = {
-	.kp	= {
-		.symbol_name = "sctp_sf_eat_sack_6_2",
-	},
-	.entry	= jsctp_sf_eat_sack,
-};
-
-static __init int sctp_setup_jprobe(void)
-{
-	int ret = register_jprobe(&sctp_recv_probe);
-
-	if (ret) {
-		if (request_module("sctp"))
-			goto out;
-		ret = register_jprobe(&sctp_recv_probe);
-	}
-
-out:
-	return ret;
-}
-
-static __init int sctpprobe_init(void)
-{
-	int ret = -ENOMEM;
-
-	/* Warning: if the function signature of sctp_sf_eat_sack_6_2,
-	 * has been changed, you also have to change the signature of
-	 * jsctp_sf_eat_sack, otherwise you end up right here!
-	 */
-	BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2,
-				 jsctp_sf_eat_sack) == 0);
-
-	init_waitqueue_head(&sctpw.wait);
-	spin_lock_init(&sctpw.lock);
-	if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL))
-		return ret;
-
-	if (!proc_create(procname, S_IRUSR, init_net.proc_net,
-			 &sctpprobe_fops))
-		goto free_kfifo;
-
-	ret = sctp_setup_jprobe();
-	if (ret)
-		goto remove_proc;
-
-	pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
-		port, fwmark, bufsize);
-	return 0;
-
-remove_proc:
-	remove_proc_entry(procname, init_net.proc_net);
-free_kfifo:
-	kfifo_free(&sctpw.fifo);
-	return ret;
-}
-
-static __exit void sctpprobe_exit(void)
-{
-	kfifo_free(&sctpw.fifo);
-	remove_proc_entry(procname, init_net.proc_net);
-	unregister_jprobe(&sctp_recv_probe);
-}
-
-module_init(sctpprobe_init);
-module_exit(sctpprobe_exit);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 26b4be6..4545bc2 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -288,12 +288,8 @@ struct sctp_ht_iter {
 static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	struct sctp_ht_iter *iter = seq->private;
-	int err = sctp_transport_walk_start(&iter->hti);
 
-	if (err) {
-		iter->start_fail = 1;
-		return ERR_PTR(err);
-	}
+	sctp_transport_walk_start(&iter->hti);
 
 	iter->start_fail = 0;
 	return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 9bf575f..b9b269c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
 	struct sctp_inithdr init;
 	union sctp_params addrs;
 	struct sctp_sock *sp;
-	__u8 extensions[4];
+	__u8 extensions[5];
 	size_t chunksize;
 	__be16 types[2];
 	int num_ext = 0;
@@ -278,6 +278,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
 	if (sp->adaptation_ind)
 		chunksize += sizeof(aiparam);
 
+	if (sp->strm_interleave) {
+		extensions[num_ext] = SCTP_CID_I_DATA;
+		num_ext += 1;
+	}
+
 	chunksize += vparam_len;
 
 	/* Account for AUTH related parameters */
@@ -392,7 +397,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
 	struct sctp_inithdr initack;
 	union sctp_params addrs;
 	struct sctp_sock *sp;
-	__u8 extensions[4];
+	__u8 extensions[5];
 	size_t chunksize;
 	int num_ext = 0;
 	int cookie_len;
@@ -442,6 +447,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
 	if (sp->adaptation_ind)
 		chunksize += sizeof(aiparam);
 
+	if (asoc->intl_enable) {
+		extensions[num_ext] = SCTP_CID_I_DATA;
+		num_ext += 1;
+	}
+
 	if (asoc->peer.auth_capable) {
 		auth_random = (struct sctp_paramhdr *)asoc->c.auth_random;
 		chunksize += ntohs(auth_random->length);
@@ -711,38 +721,31 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
 /* Make a DATA chunk for the given association from the provided
  * parameters.  However, do not populate the data payload.
  */
-struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
+struct sctp_chunk *sctp_make_datafrag_empty(const struct sctp_association *asoc,
 					    const struct sctp_sndrcvinfo *sinfo,
-					    int data_len, __u8 flags, __u16 ssn,
-					    gfp_t gfp)
+					    int len, __u8 flags, gfp_t gfp)
 {
 	struct sctp_chunk *retval;
 	struct sctp_datahdr dp;
-	int chunk_len;
 
 	/* We assign the TSN as LATE as possible, not here when
 	 * creating the chunk.
 	 */
-	dp.tsn = 0;
+	memset(&dp, 0, sizeof(dp));
+	dp.ppid = sinfo->sinfo_ppid;
 	dp.stream = htons(sinfo->sinfo_stream);
-	dp.ppid   = sinfo->sinfo_ppid;
 
 	/* Set the flags for an unordered send.  */
-	if (sinfo->sinfo_flags & SCTP_UNORDERED) {
+	if (sinfo->sinfo_flags & SCTP_UNORDERED)
 		flags |= SCTP_DATA_UNORDERED;
-		dp.ssn = 0;
-	} else
-		dp.ssn = htons(ssn);
 
-	chunk_len = sizeof(dp) + data_len;
-	retval = sctp_make_data(asoc, flags, chunk_len, gfp);
+	retval = sctp_make_data(asoc, flags, sizeof(dp) + len, gfp);
 	if (!retval)
-		goto nodata;
+		return NULL;
 
 	retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
 	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
 
-nodata:
 	return retval;
 }
 
@@ -1415,6 +1418,12 @@ static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
 	return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp);
 }
 
+struct sctp_chunk *sctp_make_idata(const struct sctp_association *asoc,
+				   __u8 flags, int paylen, gfp_t gfp)
+{
+	return _sctp_make_chunk(asoc, SCTP_CID_I_DATA, flags, paylen, gfp);
+}
+
 static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
 					    __u8 type, __u8 flags, int paylen,
 					    gfp_t gfp)
@@ -2032,6 +2041,10 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
 			if (net->sctp.addip_enable)
 				asoc->peer.asconf_capable = 1;
 			break;
+		case SCTP_CID_I_DATA:
+			if (sctp_sk(asoc->base.sk)->strm_interleave)
+				asoc->intl_enable = 1;
+			break;
 		default:
 			break;
 		}
@@ -3523,6 +3536,30 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
 	return retval;
 }
 
+struct sctp_chunk *sctp_make_ifwdtsn(const struct sctp_association *asoc,
+				     __u32 new_cum_tsn, size_t nstreams,
+				     struct sctp_ifwdtsn_skip *skiplist)
+{
+	struct sctp_chunk *retval = NULL;
+	struct sctp_ifwdtsn_hdr ftsn_hdr;
+	size_t hint;
+
+	hint = (nstreams + 1) * sizeof(__u32);
+
+	retval = sctp_make_control(asoc, SCTP_CID_I_FWD_TSN, 0, hint,
+				   GFP_ATOMIC);
+	if (!retval)
+		return NULL;
+
+	ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn);
+	retval->subh.ifwdtsn_hdr =
+		sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr);
+
+	sctp_addto_chunk(retval, nstreams * sizeof(skiplist[0]), skiplist);
+
+	return retval;
+}
+
 /* RE-CONFIG 3.1 (RE-CONFIG chunk)
  *   0                   1                   2                   3
  *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index df94d77..b71e7fb 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -632,7 +632,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
 	struct sctp_chunk *abort;
 
 	/* Cancel any partial delivery in progress. */
-	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+	asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
 
 	if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
 		event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
@@ -878,12 +878,12 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
 		 * successfully completed a connect() call.
 		 */
 		if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
-			sk->sk_state = SCTP_SS_ESTABLISHED;
+			inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
 
 		/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
 		if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
 		    sctp_sstate(sk, ESTABLISHED)) {
-			sk->sk_state = SCTP_SS_CLOSING;
+			inet_sk_set_state(sk, SCTP_SS_CLOSING);
 			sk->sk_shutdown |= RCV_SHUTDOWN;
 		}
 	}
@@ -972,7 +972,7 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
 		if (!ev)
 			return;
 
-		sctp_ulpq_tail_event(&asoc->ulpq, ev);
+		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 
 		switch (err_hdr->cause) {
 		case SCTP_ERROR_UNKNOWN_CHUNK:
@@ -1007,18 +1007,6 @@ static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
 	}
 }
 
-/* Process variable FWDTSN chunk information. */
-static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
-				    struct sctp_chunk *chunk)
-{
-	struct sctp_fwdtsn_skip *skip;
-
-	/* Walk through all the skipped SSNs */
-	sctp_walk_fwdtsn(skip, chunk) {
-		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
-	}
-}
-
 /* Helper function to remove the association non-primary peer
  * transports.
  */
@@ -1058,7 +1046,7 @@ static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
 					    asoc->c.sinit_max_instreams,
 					    NULL, GFP_ATOMIC);
 	if (ev)
-		sctp_ulpq_tail_event(&asoc->ulpq, ev);
+		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 }
 
 /* Helper function to generate an adaptation indication event */
@@ -1070,7 +1058,7 @@ static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
 	ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
 
 	if (ev)
-		sctp_ulpq_tail_event(&asoc->ulpq, ev);
+		asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 }
 
 
@@ -1368,18 +1356,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			break;
 
 		case SCTP_CMD_REPORT_FWDTSN:
-			/* Move the Cumulattive TSN Ack ahead. */
-			sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
-
-			/* purge the fragmentation queue */
-			sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
-
-			/* Abort any in progress partial delivery. */
-			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+			asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
 			break;
 
 		case SCTP_CMD_PROCESS_FWDTSN:
-			sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
+			asoc->stream.si->handle_ftsn(&asoc->ulpq,
+						     cmd->obj.chunk);
 			break;
 
 		case SCTP_CMD_GEN_SACK:
@@ -1483,8 +1465,9 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
 				 __func__, cmd->obj.chunk, &asoc->ulpq);
 
-			sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
-					    GFP_ATOMIC);
+			asoc->stream.si->ulpevent_data(&asoc->ulpq,
+						       cmd->obj.chunk,
+						       GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_EVENT_ULP:
@@ -1492,7 +1475,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
 				 __func__, cmd->obj.ulpevent, &asoc->ulpq);
 
-			sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
+			asoc->stream.si->enqueue_event(&asoc->ulpq,
+						       cmd->obj.ulpevent);
 			break;
 
 		case SCTP_CMD_REPLY:
@@ -1729,12 +1713,13 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
 			break;
 
 		case SCTP_CMD_PART_DELIVER:
-			sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
+			asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_RENEGE:
-			sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
-					 GFP_ATOMIC);
+			asoc->stream.si->renege_events(&asoc->ulpq,
+						       cmd->obj.chunk,
+						       GFP_ATOMIC);
 			break;
 
 		case SCTP_CMD_SETUP_T4:
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 8f8ccde..eb7905f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -59,6 +59,9 @@
 #include <net/sctp/sm.h>
 #include <net/sctp/structs.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/sctp.h>
+
 static struct sctp_packet *sctp_abort_pkt_new(
 					struct net *net,
 					const struct sctp_endpoint *ep,
@@ -3013,7 +3016,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -3034,7 +3037,7 @@ enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net,
 	case SCTP_IERROR_PROTO_VIOLATION:
 		return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
 					       (u8 *)chunk->subh.data_hdr,
-					       sizeof(struct sctp_datahdr));
+					       sctp_datahdr_len(&asoc->stream));
 	default:
 		BUG();
 	}
@@ -3133,7 +3136,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 	}
 
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_data_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_datachk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -3150,7 +3153,7 @@ enum sctp_disposition sctp_sf_eat_data_fast_4_4(
 	case SCTP_IERROR_PROTO_VIOLATION:
 		return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
 					       (u8 *)chunk->subh.data_hdr,
-					       sizeof(struct sctp_datahdr));
+					       sctp_datahdr_len(&asoc->stream));
 	default:
 		BUG();
 	}
@@ -3219,6 +3222,8 @@ enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net,
 	struct sctp_sackhdr *sackh;
 	__u32 ctsn;
 
+	trace_sctp_probe(ep, asoc, chunk);
+
 	if (!sctp_vtag_verify(chunk, asoc))
 		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
@@ -3957,7 +3962,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
 {
 	struct sctp_fwdtsn_hdr *fwdtsn_hdr;
 	struct sctp_chunk *chunk = arg;
-	struct sctp_fwdtsn_skip *skip;
 	__u16 len;
 	__u32 tsn;
 
@@ -3971,7 +3975,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
 		return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the FORWARD_TSN chunk has valid length.  */
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -3990,14 +3994,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net,
 	if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
 		goto discard_noforce;
 
-	/* Silently discard the chunk if stream-id is not valid */
-	sctp_walk_fwdtsn(skip, chunk) {
-		if (ntohs(skip->stream) >= asoc->stream.incnt)
-			goto discard_noforce;
-	}
+	if (!asoc->stream.si->validate_ftsn(chunk))
+		goto discard_noforce;
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
-	if (len > sizeof(struct sctp_fwdtsn_hdr))
+	if (len > sctp_ftsnhdr_len(&asoc->stream))
 		sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
 				SCTP_CHUNK(chunk));
 
@@ -4028,7 +4029,6 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
 {
 	struct sctp_fwdtsn_hdr *fwdtsn_hdr;
 	struct sctp_chunk *chunk = arg;
-	struct sctp_fwdtsn_skip *skip;
 	__u16 len;
 	__u32 tsn;
 
@@ -4042,7 +4042,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
 		return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
 
 	/* Make sure that the FORWARD_TSN chunk has a valid length.  */
-	if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
+	if (!sctp_chunk_length_valid(chunk, sctp_ftsnchk_len(&asoc->stream)))
 		return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
 						  commands);
 
@@ -4061,14 +4061,11 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
 	if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
 		goto gen_shutdown;
 
-	/* Silently discard the chunk if stream-id is not valid */
-	sctp_walk_fwdtsn(skip, chunk) {
-		if (ntohs(skip->stream) >= asoc->stream.incnt)
-			goto gen_shutdown;
-	}
+	if (!asoc->stream.si->validate_ftsn(chunk))
+		goto gen_shutdown;
 
 	sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
-	if (len > sizeof(struct sctp_fwdtsn_hdr))
+	if (len > sctp_ftsnhdr_len(&asoc->stream))
 		sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
 				SCTP_CHUNK(chunk));
 
@@ -6244,14 +6241,12 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	struct sctp_chunk *err;
 	enum sctp_verb deliver;
 	size_t datalen;
-	u8 ordered = 0;
-	u16 ssn, sid;
 	__u32 tsn;
 	int tmp;
 
 	data_hdr = (struct sctp_datahdr *)chunk->skb->data;
 	chunk->subh.data_hdr = data_hdr;
-	skb_pull(chunk->skb, sizeof(*data_hdr));
+	skb_pull(chunk->skb, sctp_datahdr_len(&asoc->stream));
 
 	tsn = ntohl(data_hdr->tsn);
 	pr_debug("%s: TSN 0x%x\n", __func__, tsn);
@@ -6299,7 +6294,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	 * Actually, allow a little bit of overflow (up to a MTU).
 	 */
 	datalen = ntohs(chunk->chunk_hdr->length);
-	datalen -= sizeof(struct sctp_data_chunk);
+	datalen -= sctp_datachk_len(&asoc->stream);
 
 	deliver = SCTP_CMD_CHUNK_ULP;
 
@@ -6394,7 +6389,6 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 		SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
 		if (chunk->asoc)
 			chunk->asoc->stats.iodchunks++;
-		ordered = 1;
 	}
 
 	/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
@@ -6405,8 +6399,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	 * with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
 	 * and discard the DATA chunk.
 	 */
-	sid = ntohs(data_hdr->stream);
-	if (sid >= asoc->stream.incnt) {
+	if (ntohs(data_hdr->stream) >= asoc->stream.incnt) {
 		/* Mark tsn as received even though we drop it */
 		sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
 
@@ -6427,8 +6420,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
 	 * SSN is smaller then the next expected one.  If it is, it wrapped
 	 * and is invalid.
 	 */
-	ssn = ntohs(data_hdr->ssn);
-	if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->stream, in, sid)))
+	if (!asoc->stream.si->validate_data(chunk))
 		return SCTP_IERROR_PROTO_VIOLATION;
 
 	/* Send the data up to the user.  Note:  Schedule  the
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 79b6bee..691d9dc 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -985,11 +985,14 @@ static const struct sctp_sm_table_entry *sctp_chunk_event_lookup(
 	if (state > SCTP_STATE_MAX)
 		return &bug;
 
+	if (cid == SCTP_CID_I_DATA)
+		cid = SCTP_CID_DATA;
+
 	if (cid <= SCTP_CID_BASE_MAX)
 		return &chunk_event_table[cid][state];
 
 	if (net->sctp.prsctp_enable) {
-		if (cid == SCTP_CID_FWD_TSN)
+		if (cid == SCTP_CID_FWD_TSN || cid == SCTP_CID_I_FWD_TSN)
 			return &prsctp_chunk_event_table[0][state];
 	}
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9b01e99..6a54ff0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -201,6 +201,22 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
 		cb(chunk);
 }
 
+static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
+				 void (*cb)(struct sk_buff *, struct sock *))
+
+{
+	struct sk_buff *skb, *tmp;
+
+	sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
+		cb(skb, sk);
+
+	sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
+		cb(skb, sk);
+
+	sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
+		cb(skb, sk);
+}
+
 /* Verify that this is a valid address. */
 static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
 				   int len)
@@ -1528,7 +1544,7 @@ static void sctp_close(struct sock *sk, long timeout)
 
 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 	sk->sk_shutdown = SHUTDOWN_MASK;
-	sk->sk_state = SCTP_SS_CLOSING;
+	inet_sk_set_state(sk, SCTP_SS_CLOSING);
 
 	ep = sctp_sk(sk)->ep;
 
@@ -1554,6 +1570,7 @@ static void sctp_close(struct sock *sk, long timeout)
 
 		if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
 		    !skb_queue_empty(&asoc->ulpq.reasm) ||
+		    !skb_queue_empty(&asoc->ulpq.reasm_uo) ||
 		    (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
 			struct sctp_chunk *chunk;
 
@@ -2002,7 +2019,20 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
 		if (err < 0)
 			goto out_free;
 
-		wait_connect = true;
+		/* If stream interleave is enabled, wait_connect has to be
+		 * done earlier than data enqueue, as it needs to make data
+		 * or idata according to asoc->intl_enable which is set
+		 * after connection is done.
+		 */
+		if (sctp_sk(asoc->base.sk)->strm_interleave) {
+			timeo = sock_sndtimeo(sk, 0);
+			err = sctp_wait_for_connect(asoc, &timeo);
+			if (err)
+				goto out_unlock;
+		} else {
+			wait_connect = true;
+		}
+
 		pr_debug("%s: we associated primitively\n", __func__);
 	}
 
@@ -2281,7 +2311,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
 			if (!event)
 				return -ENOMEM;
 
-			sctp_ulpq_tail_event(&asoc->ulpq, event);
+			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
 		}
 	}
 
@@ -3180,7 +3210,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
 		if (val == 0) {
 			val = asoc->pathmtu - sp->pf->af->net_header_len;
 			val -= sizeof(struct sctphdr) +
-			       sizeof(struct sctp_data_chunk);
+			       sctp_datachk_len(&asoc->stream);
 		}
 		asoc->user_frag = val;
 		asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
@@ -3350,7 +3380,10 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
 	if (get_user(val, (int __user *)optval))
 		return -EFAULT;
 
-	sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
+	sctp_sk(sk)->frag_interleave = !!val;
+
+	if (!sctp_sk(sk)->frag_interleave)
+		sctp_sk(sk)->strm_interleave = 0;
 
 	return 0;
 }
@@ -4033,6 +4066,40 @@ static int sctp_setsockopt_scheduler_value(struct sock *sk,
 	return retval;
 }
 
+static int sctp_setsockopt_interleaving_supported(struct sock *sk,
+						  char __user *optval,
+						  unsigned int optlen)
+{
+	struct sctp_sock *sp = sctp_sk(sk);
+	struct net *net = sock_net(sk);
+	struct sctp_assoc_value params;
+	int retval = -EINVAL;
+
+	if (optlen < sizeof(params))
+		goto out;
+
+	optlen = sizeof(params);
+	if (copy_from_user(&params, optval, optlen)) {
+		retval = -EFAULT;
+		goto out;
+	}
+
+	if (params.assoc_id)
+		goto out;
+
+	if (!net->sctp.intl_enable || !sp->frag_interleave) {
+		retval = -EPERM;
+		goto out;
+	}
+
+	sp->strm_interleave = !!params.assoc_value;
+
+	retval = 0;
+
+out:
+	return retval;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4220,6 +4287,10 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
 	case SCTP_STREAM_SCHEDULER_VALUE:
 		retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
 		break;
+	case SCTP_INTERLEAVING_SUPPORTED:
+		retval = sctp_setsockopt_interleaving_supported(sk, optval,
+								optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -4596,7 +4667,7 @@ static void sctp_shutdown(struct sock *sk, int how)
 	if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
 		struct sctp_association *asoc;
 
-		sk->sk_state = SCTP_SS_CLOSING;
+		inet_sk_set_state(sk, SCTP_SS_CLOSING);
 		asoc = list_entry(ep->asocs.next,
 				  struct sctp_association, asocs);
 		sctp_primitive_SHUTDOWN(net, asoc, NULL);
@@ -4690,20 +4761,11 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
 EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
 
 /* use callback to avoid exporting the core structure */
-int sctp_transport_walk_start(struct rhashtable_iter *iter)
+void sctp_transport_walk_start(struct rhashtable_iter *iter)
 {
-	int err;
-
 	rhltable_walk_enter(&sctp_transport_hashtable, iter);
 
-	err = rhashtable_walk_start(iter);
-	if (err && err != -EAGAIN) {
-		rhashtable_walk_stop(iter);
-		rhashtable_walk_exit(iter);
-		return err;
-	}
-
-	return 0;
+	rhashtable_walk_start(iter);
 }
 
 void sctp_transport_walk_stop(struct rhashtable_iter *iter)
@@ -4794,12 +4856,10 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
 			    struct net *net, int *pos, void *p) {
 	struct rhashtable_iter hti;
 	struct sctp_transport *tsp;
-	int ret;
+	int ret = 0;
 
 again:
-	ret = sctp_transport_walk_start(&hti);
-	if (ret)
-		return ret;
+	sctp_transport_walk_start(&hti);
 
 	tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
 	for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
@@ -6998,6 +7058,47 @@ static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
 	return retval;
 }
 
+static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
+						  char __user *optval,
+						  int __user *optlen)
+{
+	struct sctp_assoc_value params;
+	struct sctp_association *asoc;
+	int retval = -EFAULT;
+
+	if (len < sizeof(params)) {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	len = sizeof(params);
+	if (copy_from_user(&params, optval, len))
+		goto out;
+
+	asoc = sctp_id2assoc(sk, params.assoc_id);
+	if (asoc) {
+		params.assoc_value = asoc->intl_enable;
+	} else if (!params.assoc_id) {
+		struct sctp_sock *sp = sctp_sk(sk);
+
+		params.assoc_value = sp->strm_interleave;
+	} else {
+		retval = -EINVAL;
+		goto out;
+	}
+
+	if (put_user(len, optlen))
+		goto out;
+
+	if (copy_to_user(optval, &params, len))
+		goto out;
+
+	retval = 0;
+
+out:
+	return retval;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
 			   char __user *optval, int __user *optlen)
 {
@@ -7188,6 +7289,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
 		retval = sctp_getsockopt_scheduler_value(sk, len, optval,
 							 optlen);
 		break;
+	case SCTP_INTERLEAVING_SUPPORTED:
+		retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
+								optlen);
+		break;
 	default:
 		retval = -ENOPROTOOPT;
 		break;
@@ -7422,13 +7527,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
 	 * sockets.
 	 *
 	 */
-	sk->sk_state = SCTP_SS_LISTENING;
+	inet_sk_set_state(sk, SCTP_SS_LISTENING);
 	if (!ep->base.bind_addr.port) {
 		if (sctp_autobind(sk))
 			return -EAGAIN;
 	} else {
 		if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
-			sk->sk_state = SCTP_SS_CLOSED;
+			inet_sk_set_state(sk, SCTP_SS_CLOSED);
 			return -EADDRINUSE;
 		}
 	}
@@ -8425,11 +8530,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
 
 	}
 
-	sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
-		sctp_skb_set_owner_r_frag(skb, newsk);
-
-	sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
-		sctp_skb_set_owner_r_frag(skb, newsk);
+	sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
 
 	/* Set the type of socket to indicate that it is peeled off from the
 	 * original UDP-style socket or created with the accept() call on a
@@ -8455,10 +8556,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
 	 * is called, set RCV_SHUTDOWN flag.
 	 */
 	if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
-		newsk->sk_state = SCTP_SS_CLOSED;
+		inet_sk_set_state(newsk, SCTP_SS_CLOSED);
 		newsk->sk_shutdown |= RCV_SHUTDOWN;
 	} else {
-		newsk->sk_state = SCTP_SS_ESTABLISHED;
+		inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
 	}
 
 	release_sock(newsk);
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 524dfeb..cedf672 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -167,6 +167,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 	sched->init(stream);
 
 in:
+	sctp_stream_interleave_init(stream);
 	if (!incnt)
 		goto out;
 
@@ -213,11 +214,13 @@ void sctp_stream_clear(struct sctp_stream *stream)
 {
 	int i;
 
-	for (i = 0; i < stream->outcnt; i++)
-		stream->out[i].ssn = 0;
+	for (i = 0; i < stream->outcnt; i++) {
+		stream->out[i].mid = 0;
+		stream->out[i].mid_uo = 0;
+	}
 
 	for (i = 0; i < stream->incnt; i++)
-		stream->in[i].ssn = 0;
+		stream->in[i].mid = 0;
 }
 
 void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
@@ -604,10 +607,10 @@ struct sctp_chunk *sctp_process_strreset_outreq(
 		}
 
 		for (i = 0; i < nums; i++)
-			stream->in[ntohs(str_p[i])].ssn = 0;
+			stream->in[ntohs(str_p[i])].mid = 0;
 	} else {
 		for (i = 0; i < stream->incnt; i++)
-			stream->in[i].ssn = 0;
+			stream->in[i].mid = 0;
 	}
 
 	result = SCTP_STRRESET_PERFORMED;
@@ -751,8 +754,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
 	 *     performed.
 	 */
 	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
-	sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
-	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+	asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
 
 	/* G1: Compute an appropriate value for the Receiver's Next TSN -- the
 	 *     TSN that the peer should use to send the next DATA chunk.  The
@@ -781,10 +783,12 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
 	/* G5:  The next expected and outgoing SSNs MUST be reset to 0 for all
 	 *      incoming and outgoing streams.
 	 */
-	for (i = 0; i < stream->outcnt; i++)
-		stream->out[i].ssn = 0;
+	for (i = 0; i < stream->outcnt; i++) {
+		stream->out[i].mid = 0;
+		stream->out[i].mid_uo = 0;
+	}
 	for (i = 0; i < stream->incnt; i++)
-		stream->in[i].ssn = 0;
+		stream->in[i].mid = 0;
 
 	result = SCTP_STRRESET_PERFORMED;
 
@@ -974,11 +978,15 @@ struct sctp_chunk *sctp_process_strreset_resp(
 
 		if (result == SCTP_STRRESET_PERFORMED) {
 			if (nums) {
-				for (i = 0; i < nums; i++)
-					stream->out[ntohs(str_p[i])].ssn = 0;
+				for (i = 0; i < nums; i++) {
+					stream->out[ntohs(str_p[i])].mid = 0;
+					stream->out[ntohs(str_p[i])].mid_uo = 0;
+				}
 			} else {
-				for (i = 0; i < stream->outcnt; i++)
-					stream->out[i].ssn = 0;
+				for (i = 0; i < stream->outcnt; i++) {
+					stream->out[i].mid = 0;
+					stream->out[i].mid_uo = 0;
+				}
 			}
 
 			flags = SCTP_STREAM_RESET_OUTGOING_SSN;
@@ -1021,8 +1029,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
 						&asoc->peer.tsn_map);
 			LIST_HEAD(temp);
 
-			sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
-			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+			asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
 
 			sctp_tsnmap_init(&asoc->peer.tsn_map,
 					 SCTP_TSN_MAP_INITIAL,
@@ -1040,10 +1047,12 @@ struct sctp_chunk *sctp_process_strreset_resp(
 			asoc->ctsn_ack_point = asoc->next_tsn - 1;
 			asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
 
-			for (i = 0; i < stream->outcnt; i++)
-				stream->out[i].ssn = 0;
+			for (i = 0; i < stream->outcnt; i++) {
+				stream->out[i].mid = 0;
+				stream->out[i].mid_uo = 0;
+			}
 			for (i = 0; i < stream->incnt; i++)
-				stream->in[i].ssn = 0;
+				stream->in[i].mid = 0;
 		}
 
 		for (i = 0; i < stream->outcnt; i++)
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
new file mode 100644
index 0000000..8c7cf8f
--- /dev/null
+++ b/net/sctp/stream_interleave.c
@@ -0,0 +1,1334 @@
+/* SCTP kernel implementation
+ * (C) Copyright Red Hat Inc. 2017
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp stream queue/scheduling.
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ *                 ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email addresched(es):
+ *    lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ *    Xin Long <lucien.xin@gmail.com>
+ */
+
+#include <net/busy_poll.h>
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+#include <net/sctp/ulpevent.h>
+#include <linux/sctp.h>
+
+static struct sctp_chunk *sctp_make_idatafrag_empty(
+					const struct sctp_association *asoc,
+					const struct sctp_sndrcvinfo *sinfo,
+					int len, __u8 flags, gfp_t gfp)
+{
+	struct sctp_chunk *retval;
+	struct sctp_idatahdr dp;
+
+	memset(&dp, 0, sizeof(dp));
+	dp.stream = htons(sinfo->sinfo_stream);
+
+	if (sinfo->sinfo_flags & SCTP_UNORDERED)
+		flags |= SCTP_DATA_UNORDERED;
+
+	retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
+	if (!retval)
+		return NULL;
+
+	retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
+	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
+
+	return retval;
+}
+
+static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
+{
+	struct sctp_stream *stream;
+	struct sctp_chunk *lchunk;
+	__u32 cfsn = 0;
+	__u16 sid;
+
+	if (chunk->has_mid)
+		return;
+
+	sid = sctp_chunk_stream_no(chunk);
+	stream = &chunk->asoc->stream;
+
+	list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
+		struct sctp_idatahdr *hdr;
+		__u32 mid;
+
+		lchunk->has_mid = 1;
+
+		hdr = lchunk->subh.idata_hdr;
+
+		if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
+			hdr->ppid = lchunk->sinfo.sinfo_ppid;
+		else
+			hdr->fsn = htonl(cfsn++);
+
+		if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
+			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+				sctp_mid_uo_next(stream, out, sid) :
+				sctp_mid_uo_peek(stream, out, sid);
+		} else {
+			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
+				sctp_mid_next(stream, out, sid) :
+				sctp_mid_peek(stream, out, sid);
+		}
+		hdr->mid = htonl(mid);
+	}
+}
+
+static bool sctp_validate_data(struct sctp_chunk *chunk)
+{
+	const struct sctp_stream *stream;
+	__u16 sid, ssn;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_DATA)
+		return false;
+
+	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+		return true;
+
+	stream = &chunk->asoc->stream;
+	sid = sctp_chunk_stream_no(chunk);
+	ssn = ntohs(chunk->subh.data_hdr->ssn);
+
+	return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
+}
+
+static bool sctp_validate_idata(struct sctp_chunk *chunk)
+{
+	struct sctp_stream *stream;
+	__u32 mid;
+	__u16 sid;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
+		return false;
+
+	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+		return true;
+
+	stream = &chunk->asoc->stream;
+	sid = sctp_chunk_stream_no(chunk);
+	mid = ntohl(chunk->subh.idata_hdr->mid);
+
+	return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
+}
+
+static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
+				  struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *cevent;
+	struct sk_buff *pos;
+
+	pos = skb_peek_tail(&ulpq->reasm);
+	if (!pos) {
+		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+		return;
+	}
+
+	cevent = sctp_skb2event(pos);
+
+	if (event->stream == cevent->stream &&
+	    event->mid == cevent->mid &&
+	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+	      event->fsn > cevent->fsn))) {
+		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+		return;
+	}
+
+	if ((event->stream == cevent->stream &&
+	     MID_lt(cevent->mid, event->mid)) ||
+	    event->stream > cevent->stream) {
+		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+		return;
+	}
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		cevent = sctp_skb2event(pos);
+
+		if (event->stream < cevent->stream ||
+		    (event->stream == cevent->stream &&
+		     MID_lt(event->mid, cevent->mid)))
+			break;
+
+		if (event->stream == cevent->stream &&
+		    event->mid == cevent->mid &&
+		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+		     event->fsn < cevent->fsn))
+			break;
+	}
+
+	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sctp_stream_in *sin;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	int is_last = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+
+		if (cevent->stream > event->stream ||
+		    cevent->mid != sin->mid)
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			goto out;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = cevent->fsn + 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn++;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = 0;
+					is_last = 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn = 0;
+				is_last = 1;
+			}
+			goto out;
+		default:
+			goto out;
+		}
+	}
+
+out:
+	if (!first_frag)
+		return NULL;
+
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn = next_fsn;
+		if (is_last) {
+			retval->msg_flags |= MSG_EOR;
+			sin->pd_mode = 0;
+		}
+	}
+
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sctp_association *asoc = ulpq->asoc;
+	struct sk_buff *pos, *first_frag = NULL;
+	struct sctp_ulpevent *retval = NULL;
+	struct sk_buff *pd_first = NULL;
+	struct sk_buff *pd_last = NULL;
+	struct sctp_stream_in *sin;
+	__u32 next_fsn = 0;
+	__u32 pd_point = 0;
+	__u32 pd_len = 0;
+	__u32 mid = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+		if (cevent->stream > event->stream)
+			break;
+
+		if (MID_lt(cevent->mid, event->mid))
+			continue;
+		if (MID_lt(event->mid, cevent->mid))
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (cevent->mid == sin->mid) {
+				pd_first = pos;
+				pd_last = pos;
+				pd_len = pos->len;
+			}
+
+			first_frag = pos;
+			next_fsn = 0;
+			mid = cevent->mid;
+			break;
+
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				if (pd_first) {
+					pd_last = pos;
+					pd_len += pos->len;
+				}
+			} else {
+				first_frag = NULL;
+			}
+			break;
+
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn)
+				goto found;
+			else
+				first_frag = NULL;
+			break;
+		}
+	}
+
+	if (!pd_first)
+		goto out;
+
+	pd_point = sctp_sk(asoc->base.sk)->pd_point;
+	if (pd_point && pd_point <= pd_len) {
+		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+						     &ulpq->reasm,
+						     pd_first, pd_last);
+		if (retval) {
+			sin->fsn = next_fsn;
+			sin->pd_mode = 1;
+		}
+	}
+	goto out;
+
+found:
+	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+					     &ulpq->reasm,
+					     first_frag, pos);
+	if (retval)
+		retval->msg_flags |= MSG_EOR;
+
+out:
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
+					     struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *retval = NULL;
+	struct sctp_stream_in *sin;
+
+	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+		event->msg_flags |= MSG_EOR;
+		return event;
+	}
+
+	sctp_intl_store_reasm(ulpq, event);
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+	if (sin->pd_mode && event->mid == sin->mid &&
+	    event->fsn == sin->fsn)
+		retval = sctp_intl_retrieve_partial(ulpq, event);
+
+	if (!retval)
+		retval = sctp_intl_retrieve_reassembled(ulpq, event);
+
+	return retval;
+}
+
+static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
+				    struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *cevent;
+	struct sk_buff *pos;
+
+	pos = skb_peek_tail(&ulpq->lobby);
+	if (!pos) {
+		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+		return;
+	}
+
+	cevent = (struct sctp_ulpevent *)pos->cb;
+	if (event->stream == cevent->stream &&
+	    MID_lt(cevent->mid, event->mid)) {
+		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+		return;
+	}
+
+	if (event->stream > cevent->stream) {
+		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+		return;
+	}
+
+	skb_queue_walk(&ulpq->lobby, pos) {
+		cevent = (struct sctp_ulpevent *)pos->cb;
+
+		if (cevent->stream > event->stream)
+			break;
+
+		if (cevent->stream == event->stream &&
+		    MID_lt(event->mid, cevent->mid))
+			break;
+	}
+
+	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
+}
+
+static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
+				       struct sctp_ulpevent *event)
+{
+	struct sk_buff_head *event_list;
+	struct sctp_stream *stream;
+	struct sk_buff *pos, *tmp;
+	__u16 sid = event->stream;
+
+	stream  = &ulpq->asoc->stream;
+	event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
+
+	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
+		struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
+
+		if (cevent->stream > sid)
+			break;
+
+		if (cevent->stream < sid)
+			continue;
+
+		if (cevent->mid != sctp_mid_peek(stream, in, sid))
+			break;
+
+		sctp_mid_next(stream, in, sid);
+
+		__skb_unlink(pos, &ulpq->lobby);
+
+		__skb_queue_tail(event_list, pos);
+	}
+}
+
+static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
+					     struct sctp_ulpevent *event)
+{
+	struct sctp_stream *stream;
+	__u16 sid;
+
+	stream  = &ulpq->asoc->stream;
+	sid = event->stream;
+
+	if (event->mid != sctp_mid_peek(stream, in, sid)) {
+		sctp_intl_store_ordered(ulpq, event);
+		return NULL;
+	}
+
+	sctp_mid_next(stream, in, sid);
+
+	sctp_intl_retrieve_ordered(ulpq, event);
+
+	return event;
+}
+
+static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
+			      struct sctp_ulpevent *event)
+{
+	struct sk_buff *skb = sctp_event2skb(event);
+	struct sock *sk = ulpq->asoc->base.sk;
+	struct sctp_sock *sp = sctp_sk(sk);
+	struct sk_buff_head *skb_list;
+
+	skb_list = (struct sk_buff_head *)skb->prev;
+
+	if (sk->sk_shutdown & RCV_SHUTDOWN &&
+	    (sk->sk_shutdown & SEND_SHUTDOWN ||
+	     !sctp_ulpevent_is_notification(event)))
+		goto out_free;
+
+	if (!sctp_ulpevent_is_notification(event)) {
+		sk_mark_napi_id(sk, skb);
+		sk_incoming_cpu_update(sk);
+	}
+
+	if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
+		goto out_free;
+
+	if (skb_list)
+		skb_queue_splice_tail_init(skb_list,
+					   &sk->sk_receive_queue);
+	else
+		__skb_queue_tail(&sk->sk_receive_queue, skb);
+
+	if (!sp->data_ready_signalled) {
+		sp->data_ready_signalled = 1;
+		sk->sk_data_ready(sk);
+	}
+
+	return 1;
+
+out_free:
+	if (skb_list)
+		sctp_queue_purge_ulpevents(skb_list);
+	else
+		sctp_ulpevent_free(event);
+
+	return 0;
+}
+
+static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
+				     struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *cevent;
+	struct sk_buff *pos;
+
+	pos = skb_peek_tail(&ulpq->reasm_uo);
+	if (!pos) {
+		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+		return;
+	}
+
+	cevent = sctp_skb2event(pos);
+
+	if (event->stream == cevent->stream &&
+	    event->mid == cevent->mid &&
+	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
+	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+	      event->fsn > cevent->fsn))) {
+		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+		return;
+	}
+
+	if ((event->stream == cevent->stream &&
+	     MID_lt(cevent->mid, event->mid)) ||
+	    event->stream > cevent->stream) {
+		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
+		return;
+	}
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		cevent = sctp_skb2event(pos);
+
+		if (event->stream < cevent->stream ||
+		    (event->stream == cevent->stream &&
+		     MID_lt(event->mid, cevent->mid)))
+			break;
+
+		if (event->stream == cevent->stream &&
+		    event->mid == cevent->mid &&
+		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
+		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
+		     event->fsn < cevent->fsn))
+			break;
+	}
+
+	__skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sctp_stream_in *sin;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	int is_last = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+		if (cevent->stream > event->stream)
+			break;
+
+		if (MID_lt(cevent->mid, sin->mid_uo))
+			continue;
+		if (MID_lt(sin->mid_uo, cevent->mid))
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			goto out;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn_uo) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = cevent->fsn + 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn++;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (!first_frag) {
+				if (cevent->fsn == sin->fsn_uo) {
+					first_frag = pos;
+					last_frag = pos;
+					next_fsn = 0;
+					is_last = 1;
+				}
+			} else if (cevent->fsn == next_fsn) {
+				last_frag = pos;
+				next_fsn = 0;
+				is_last = 1;
+			}
+			goto out;
+		default:
+			goto out;
+		}
+	}
+
+out:
+	if (!first_frag)
+		return NULL;
+
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm_uo, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn_uo = next_fsn;
+		if (is_last) {
+			retval->msg_flags |= MSG_EOR;
+			sin->pd_mode_uo = 0;
+		}
+	}
+
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
+						struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sctp_association *asoc = ulpq->asoc;
+	struct sk_buff *pos, *first_frag = NULL;
+	struct sctp_ulpevent *retval = NULL;
+	struct sk_buff *pd_first = NULL;
+	struct sk_buff *pd_last = NULL;
+	struct sctp_stream_in *sin;
+	__u32 next_fsn = 0;
+	__u32 pd_point = 0;
+	__u32 pd_len = 0;
+	__u32 mid = 0;
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		if (cevent->stream < event->stream)
+			continue;
+		if (cevent->stream > event->stream)
+			break;
+
+		if (MID_lt(cevent->mid, event->mid))
+			continue;
+		if (MID_lt(event->mid, cevent->mid))
+			break;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (!sin->pd_mode_uo) {
+				sin->mid_uo = cevent->mid;
+				pd_first = pos;
+				pd_last = pos;
+				pd_len = pos->len;
+			}
+
+			first_frag = pos;
+			next_fsn = 0;
+			mid = cevent->mid;
+			break;
+
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				if (pd_first) {
+					pd_last = pos;
+					pd_len += pos->len;
+				}
+			} else {
+				first_frag = NULL;
+			}
+			break;
+
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag && cevent->mid == mid &&
+			    cevent->fsn == next_fsn)
+				goto found;
+			else
+				first_frag = NULL;
+			break;
+		}
+	}
+
+	if (!pd_first)
+		goto out;
+
+	pd_point = sctp_sk(asoc->base.sk)->pd_point;
+	if (pd_point && pd_point <= pd_len) {
+		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+						     &ulpq->reasm_uo,
+						     pd_first, pd_last);
+		if (retval) {
+			sin->fsn_uo = next_fsn;
+			sin->pd_mode_uo = 1;
+		}
+	}
+	goto out;
+
+found:
+	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
+					     &ulpq->reasm_uo,
+					     first_frag, pos);
+	if (retval)
+		retval->msg_flags |= MSG_EOR;
+
+out:
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
+						struct sctp_ulpevent *event)
+{
+	struct sctp_ulpevent *retval = NULL;
+	struct sctp_stream_in *sin;
+
+	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
+		event->msg_flags |= MSG_EOR;
+		return event;
+	}
+
+	sctp_intl_store_reasm_uo(ulpq, event);
+
+	sin = sctp_stream_in(ulpq->asoc, event->stream);
+	if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
+	    event->fsn == sin->fsn_uo)
+		retval = sctp_intl_retrieve_partial_uo(ulpq, event);
+
+	if (!retval)
+		retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
+
+	return retval;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
+{
+	struct sctp_stream_in *csin, *sin = NULL;
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	__u16 sid = 0;
+
+	skb_queue_walk(&ulpq->reasm_uo, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+		if (csin->pd_mode_uo)
+			continue;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (first_frag)
+				goto out;
+			first_frag = pos;
+			last_frag = pos;
+			next_fsn = 0;
+			sin = csin;
+			sid = cevent->stream;
+			sin->mid_uo = cevent->mid;
+			break;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag)
+				break;
+			if (cevent->stream == sid &&
+			    cevent->mid == sin->mid_uo &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				last_frag = pos;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag)
+				goto out;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!first_frag)
+		return NULL;
+
+out:
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm_uo, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn_uo = next_fsn;
+		sin->pd_mode_uo = 1;
+	}
+
+	return retval;
+}
+
+static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
+			       struct sctp_chunk *chunk, gfp_t gfp)
+{
+	struct sctp_ulpevent *event;
+	struct sk_buff_head temp;
+	int event_eor = 0;
+
+	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
+	if (!event)
+		return -ENOMEM;
+
+	event->mid = ntohl(chunk->subh.idata_hdr->mid);
+	if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
+		event->ppid = chunk->subh.idata_hdr->ppid;
+	else
+		event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
+
+	if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
+		event = sctp_intl_reasm(ulpq, event);
+		if (event && event->msg_flags & MSG_EOR) {
+			skb_queue_head_init(&temp);
+			__skb_queue_tail(&temp, sctp_event2skb(event));
+
+			event = sctp_intl_order(ulpq, event);
+		}
+	} else {
+		event = sctp_intl_reasm_uo(ulpq, event);
+	}
+
+	if (event) {
+		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
+		sctp_enqueue_event(ulpq, event);
+	}
+
+	return event_eor;
+}
+
+static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
+{
+	struct sctp_stream_in *csin, *sin = NULL;
+	struct sk_buff *first_frag = NULL;
+	struct sk_buff *last_frag = NULL;
+	struct sctp_ulpevent *retval;
+	struct sk_buff *pos;
+	__u32 next_fsn = 0;
+	__u16 sid = 0;
+
+	skb_queue_walk(&ulpq->reasm, pos) {
+		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
+
+		csin = sctp_stream_in(ulpq->asoc, cevent->stream);
+		if (csin->pd_mode)
+			continue;
+
+		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
+		case SCTP_DATA_FIRST_FRAG:
+			if (first_frag)
+				goto out;
+			if (cevent->mid == csin->mid) {
+				first_frag = pos;
+				last_frag = pos;
+				next_fsn = 0;
+				sin = csin;
+				sid = cevent->stream;
+			}
+			break;
+		case SCTP_DATA_MIDDLE_FRAG:
+			if (!first_frag)
+				break;
+			if (cevent->stream == sid &&
+			    cevent->mid == sin->mid &&
+			    cevent->fsn == next_fsn) {
+				next_fsn++;
+				last_frag = pos;
+			} else {
+				goto out;
+			}
+			break;
+		case SCTP_DATA_LAST_FRAG:
+			if (first_frag)
+				goto out;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (!first_frag)
+		return NULL;
+
+out:
+	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
+					     &ulpq->reasm, first_frag,
+					     last_frag);
+	if (retval) {
+		sin->fsn = next_fsn;
+		sin->pd_mode = 1;
+	}
+
+	return retval;
+}
+
+static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+	struct sctp_ulpevent *event;
+
+	if (!skb_queue_empty(&ulpq->reasm)) {
+		do {
+			event = sctp_intl_retrieve_first(ulpq);
+			if (event)
+				sctp_enqueue_event(ulpq, event);
+		} while (event);
+	}
+
+	if (!skb_queue_empty(&ulpq->reasm_uo)) {
+		do {
+			event = sctp_intl_retrieve_first_uo(ulpq);
+			if (event)
+				sctp_enqueue_event(ulpq, event);
+		} while (event);
+	}
+}
+
+static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
+			       gfp_t gfp)
+{
+	struct sctp_association *asoc = ulpq->asoc;
+	__u32 freed = 0;
+	__u16 needed;
+
+	if (chunk) {
+		needed = ntohs(chunk->chunk_hdr->length);
+		needed -= sizeof(struct sctp_idata_chunk);
+	} else {
+		needed = SCTP_DEFAULT_MAXWINDOW;
+	}
+
+	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
+		freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
+		if (freed < needed)
+			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
+						       needed);
+		if (freed < needed)
+			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
+						       needed);
+	}
+
+	if (chunk && freed >= needed)
+		if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
+			sctp_intl_start_pd(ulpq, gfp);
+
+	sk_mem_reclaim(asoc->base.sk);
+}
+
+static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
+				      __u32 mid, __u16 flags, gfp_t gfp)
+{
+	struct sock *sk = ulpq->asoc->base.sk;
+	struct sctp_ulpevent *ev = NULL;
+
+	if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
+					&sctp_sk(sk)->subscribe))
+		return;
+
+	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
+				      sid, mid, flags, gfp);
+	if (ev) {
+		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
+
+		if (!sctp_sk(sk)->data_ready_signalled) {
+			sctp_sk(sk)->data_ready_signalled = 1;
+			sk->sk_data_ready(sk);
+		}
+	}
+}
+
+static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
+{
+	struct sctp_stream *stream = &ulpq->asoc->stream;
+	struct sctp_ulpevent *cevent, *event = NULL;
+	struct sk_buff_head *lobby = &ulpq->lobby;
+	struct sk_buff *pos, *tmp;
+	struct sk_buff_head temp;
+	__u16 csid;
+	__u32 cmid;
+
+	skb_queue_head_init(&temp);
+	sctp_skb_for_each(pos, lobby, tmp) {
+		cevent = (struct sctp_ulpevent *)pos->cb;
+		csid = cevent->stream;
+		cmid = cevent->mid;
+
+		if (csid > sid)
+			break;
+
+		if (csid < sid)
+			continue;
+
+		if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
+			break;
+
+		__skb_unlink(pos, lobby);
+		if (!event)
+			event = sctp_skb2event(pos);
+
+		__skb_queue_tail(&temp, pos);
+	}
+
+	if (!event && pos != (struct sk_buff *)lobby) {
+		cevent = (struct sctp_ulpevent *)pos->cb;
+		csid = cevent->stream;
+		cmid = cevent->mid;
+
+		if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
+			sctp_mid_next(stream, in, csid);
+			__skb_unlink(pos, lobby);
+			__skb_queue_tail(&temp, pos);
+			event = sctp_skb2event(pos);
+		}
+	}
+
+	if (event) {
+		sctp_intl_retrieve_ordered(ulpq, event);
+		sctp_enqueue_event(ulpq, event);
+	}
+}
+
+static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
+{
+	struct sctp_stream *stream = &ulpq->asoc->stream;
+	__u16 sid;
+
+	for (sid = 0; sid < stream->incnt; sid++) {
+		struct sctp_stream_in *sin = &stream->in[sid];
+		__u32 mid;
+
+		if (sin->pd_mode_uo) {
+			sin->pd_mode_uo = 0;
+
+			mid = sin->mid_uo;
+			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
+		}
+
+		if (sin->pd_mode) {
+			sin->pd_mode = 0;
+
+			mid = sin->mid;
+			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
+			sctp_mid_skip(stream, in, sid, mid);
+
+			sctp_intl_reap_ordered(ulpq, sid);
+		}
+	}
+
+	/* intl abort pd happens only when all data needs to be cleaned */
+	sctp_ulpq_flush(ulpq);
+}
+
+static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
+				    int nskips, __be16 stream, __u8 flags)
+{
+	int i;
+
+	for (i = 0; i < nskips; i++)
+		if (skiplist[i].stream == stream &&
+		    skiplist[i].flags == flags)
+			return i;
+
+	return i;
+}
+
+#define SCTP_FTSN_U_BIT	0x1
+static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
+{
+	struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
+	struct sctp_association *asoc = q->asoc;
+	struct sctp_chunk *ftsn_chunk = NULL;
+	struct list_head *lchunk, *temp;
+	int nskips = 0, skip_pos;
+	struct sctp_chunk *chunk;
+	__u32 tsn;
+
+	if (!asoc->peer.prsctp_capable)
+		return;
+
+	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
+		asoc->adv_peer_ack_point = ctsn;
+
+	list_for_each_safe(lchunk, temp, &q->abandoned) {
+		chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
+		tsn = ntohl(chunk->subh.data_hdr->tsn);
+
+		if (TSN_lte(tsn, ctsn)) {
+			list_del_init(lchunk);
+			sctp_chunk_free(chunk);
+		} else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
+			__be16 sid = chunk->subh.idata_hdr->stream;
+			__be32 mid = chunk->subh.idata_hdr->mid;
+			__u8 flags = 0;
+
+			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+				flags |= SCTP_FTSN_U_BIT;
+
+			asoc->adv_peer_ack_point = tsn;
+			skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
+						     sid, flags);
+			ftsn_skip_arr[skip_pos].stream = sid;
+			ftsn_skip_arr[skip_pos].reserved = 0;
+			ftsn_skip_arr[skip_pos].flags = flags;
+			ftsn_skip_arr[skip_pos].mid = mid;
+			if (skip_pos == nskips)
+				nskips++;
+			if (nskips == 10)
+				break;
+		} else {
+			break;
+		}
+	}
+
+	if (asoc->adv_peer_ack_point > ctsn)
+		ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
+					       nskips, &ftsn_skip_arr[0]);
+
+	if (ftsn_chunk) {
+		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
+		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
+	}
+}
+
+#define _sctp_walk_ifwdtsn(pos, chunk, end) \
+	for (pos = chunk->subh.ifwdtsn_hdr->skip; \
+	     (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
+
+#define sctp_walk_ifwdtsn(pos, ch) \
+	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
+					sizeof(struct sctp_ifwdtsn_chunk))
+
+static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
+{
+	struct sctp_fwdtsn_skip *skip;
+	__u16 incnt;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
+		return false;
+
+	incnt = chunk->asoc->stream.incnt;
+	sctp_walk_fwdtsn(skip, chunk)
+		if (ntohs(skip->stream) >= incnt)
+			return false;
+
+	return true;
+}
+
+static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
+{
+	struct sctp_ifwdtsn_skip *skip;
+	__u16 incnt;
+
+	if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
+		return false;
+
+	incnt = chunk->asoc->stream.incnt;
+	sctp_walk_ifwdtsn(skip, chunk)
+		if (ntohs(skip->stream) >= incnt)
+			return false;
+
+	return true;
+}
+
+static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+	/* Move the Cumulattive TSN Ack ahead. */
+	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+	/* purge the fragmentation queue */
+	sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
+	/* Abort any in progress partial delivery. */
+	sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+	struct sk_buff *pos, *tmp;
+
+	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
+		struct sctp_ulpevent *event = sctp_skb2event(pos);
+		__u32 tsn = event->tsn;
+
+		if (TSN_lte(tsn, ftsn)) {
+			__skb_unlink(pos, &ulpq->reasm);
+			sctp_ulpevent_free(event);
+		}
+	}
+
+	skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
+		struct sctp_ulpevent *event = sctp_skb2event(pos);
+		__u32 tsn = event->tsn;
+
+		if (TSN_lte(tsn, ftsn)) {
+			__skb_unlink(pos, &ulpq->reasm_uo);
+			sctp_ulpevent_free(event);
+		}
+	}
+}
+
+static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
+{
+	/* Move the Cumulattive TSN Ack ahead. */
+	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
+	/* purge the fragmentation queue */
+	sctp_intl_reasm_flushtsn(ulpq, ftsn);
+	/* abort only when it's for all data */
+	if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
+		sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
+}
+
+static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+	struct sctp_fwdtsn_skip *skip;
+
+	/* Walk through all the skipped SSNs */
+	sctp_walk_fwdtsn(skip, chunk)
+		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
+}
+
+static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
+			   __u8 flags)
+{
+	struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid);
+	struct sctp_stream *stream  = &ulpq->asoc->stream;
+
+	if (flags & SCTP_FTSN_U_BIT) {
+		if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
+			sin->pd_mode_uo = 0;
+			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
+						  GFP_ATOMIC);
+		}
+		return;
+	}
+
+	if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
+		return;
+
+	if (sin->pd_mode) {
+		sin->pd_mode = 0;
+		sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
+	}
+
+	sctp_mid_skip(stream, in, sid, mid);
+
+	sctp_intl_reap_ordered(ulpq, sid);
+}
+
+static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
+{
+	struct sctp_ifwdtsn_skip *skip;
+
+	/* Walk through all the skipped MIDs and abort stream pd if possible */
+	sctp_walk_ifwdtsn(skip, chunk)
+		sctp_intl_skip(ulpq, ntohs(skip->stream),
+			       ntohl(skip->mid), skip->flags);
+}
+
+static struct sctp_stream_interleave sctp_stream_interleave_0 = {
+	.data_chunk_len		= sizeof(struct sctp_data_chunk),
+	.ftsn_chunk_len		= sizeof(struct sctp_fwdtsn_chunk),
+	/* DATA process functions */
+	.make_datafrag		= sctp_make_datafrag_empty,
+	.assign_number		= sctp_chunk_assign_ssn,
+	.validate_data		= sctp_validate_data,
+	.ulpevent_data		= sctp_ulpq_tail_data,
+	.enqueue_event		= sctp_ulpq_tail_event,
+	.renege_events		= sctp_ulpq_renege,
+	.start_pd		= sctp_ulpq_partial_delivery,
+	.abort_pd		= sctp_ulpq_abort_pd,
+	/* FORWARD-TSN process functions */
+	.generate_ftsn		= sctp_generate_fwdtsn,
+	.validate_ftsn		= sctp_validate_fwdtsn,
+	.report_ftsn		= sctp_report_fwdtsn,
+	.handle_ftsn		= sctp_handle_fwdtsn,
+};
+
+static struct sctp_stream_interleave sctp_stream_interleave_1 = {
+	.data_chunk_len		= sizeof(struct sctp_idata_chunk),
+	.ftsn_chunk_len		= sizeof(struct sctp_ifwdtsn_chunk),
+	/* I-DATA process functions */
+	.make_datafrag		= sctp_make_idatafrag_empty,
+	.assign_number		= sctp_chunk_assign_mid,
+	.validate_data		= sctp_validate_idata,
+	.ulpevent_data		= sctp_ulpevent_idata,
+	.enqueue_event		= sctp_enqueue_event,
+	.renege_events		= sctp_renege_events,
+	.start_pd		= sctp_intl_start_pd,
+	.abort_pd		= sctp_intl_abort_pd,
+	/* I-FORWARD-TSN process functions */
+	.generate_ftsn		= sctp_generate_iftsn,
+	.validate_ftsn		= sctp_validate_iftsn,
+	.report_ftsn		= sctp_report_iftsn,
+	.handle_ftsn		= sctp_handle_iftsn,
+};
+
+void sctp_stream_interleave_init(struct sctp_stream *stream)
+{
+	struct sctp_association *asoc;
+
+	asoc = container_of(stream, struct sctp_association, stream);
+	stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
+				       : &sctp_stream_interleave_0;
+}
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index d8c162a..f5fcd42 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -242,7 +242,8 @@ int sctp_sched_get_value(struct sctp_association *asoc, __u16 sid,
 
 void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch)
 {
-	if (!list_is_last(&ch->frag_list, &ch->msg->chunks)) {
+	if (!list_is_last(&ch->frag_list, &ch->msg->chunks) &&
+	    !q->asoc->intl_enable) {
 		struct sctp_stream_out *sout;
 		__u16 sid;
 
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index ef7ca44..33ca5b7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -289,6 +289,13 @@ static struct ctl_table sctp_net_table[] = {
 		.proc_handler	= proc_sctp_do_auth,
 	},
 	{
+		.procname	= "intl_enable",
+		.data		= &init_net.sctp.intl_enable,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "addr_scope_policy",
 		.data		= &init_net.sctp.scope_policy,
 		.maxlen		= sizeof(int),
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 5447228..84207ad 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -443,8 +443,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
 		goto fail;
 
 	/* Pull off the common chunk header and DATA header.  */
-	skb_pull(skb, sizeof(struct sctp_data_chunk));
-	len -= sizeof(struct sctp_data_chunk);
+	skb_pull(skb, sctp_datachk_len(&asoc->stream));
+	len -= sctp_datachk_len(&asoc->stream);
 
 	/* Embed the event fields inside the cloned skb.  */
 	event = sctp_skb2event(skb);
@@ -705,8 +705,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
 	sctp_ulpevent_receive_data(event, asoc);
 
 	event->stream = ntohs(chunk->subh.data_hdr->stream);
-	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
-	event->ppid = chunk->subh.data_hdr->ppid;
 	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
 		event->flags |= SCTP_UNORDERED;
 		event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
@@ -732,8 +730,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
  *   various events.
  */
 struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
-	const struct sctp_association *asoc, __u32 indication,
-	gfp_t gfp)
+					const struct sctp_association *asoc,
+					__u32 indication, __u32 sid, __u32 seq,
+					__u32 flags, gfp_t gfp)
 {
 	struct sctp_ulpevent *event;
 	struct sctp_pdapi_event *pd;
@@ -754,7 +753,9 @@ struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
 	 *   Currently unused.
 	 */
 	pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
-	pd->pdapi_flags = 0;
+	pd->pdapi_flags = flags;
+	pd->pdapi_stream = sid;
+	pd->pdapi_seq = seq;
 
 	/* pdapi_length: 32 bits (unsigned integer)
 	 *
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index e36ec5d..0b42710 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -60,6 +60,7 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
 
 	ulpq->asoc = asoc;
 	skb_queue_head_init(&ulpq->reasm);
+	skb_queue_head_init(&ulpq->reasm_uo);
 	skb_queue_head_init(&ulpq->lobby);
 	ulpq->pd_mode  = 0;
 
@@ -83,6 +84,10 @@ void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
 		sctp_ulpevent_free(event);
 	}
 
+	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
+		event = sctp_skb2event(skb);
+		sctp_ulpevent_free(event);
+	}
 }
 
 /* Dispose of a ulpqueue.  */
@@ -104,6 +109,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 	if (!event)
 		return -ENOMEM;
 
+	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
+	event->ppid = chunk->subh.data_hdr->ppid;
+
 	/* Do reassembly if needed.  */
 	event = sctp_ulpq_reasm(ulpq, event);
 
@@ -328,9 +336,10 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
  * payload was fragmented on the way and ip had to reassemble them.
  * We add the rest of skb's to the first skb's fraglist.
  */
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
-	struct sk_buff_head *queue, struct sk_buff *f_frag,
-	struct sk_buff *l_frag)
+struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
+						  struct sk_buff_head *queue,
+						  struct sk_buff *f_frag,
+						  struct sk_buff *l_frag)
 {
 	struct sk_buff *pos;
 	struct sk_buff *new = NULL;
@@ -853,7 +862,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 	struct sctp_stream *stream;
 
 	/* Check if this message needs ordering.  */
-	if (SCTP_DATA_UNORDERED & event->msg_flags)
+	if (event->msg_flags & SCTP_DATA_UNORDERED)
 		return event;
 
 	/* Note: The stream ID must be verified before this routine.  */
@@ -974,8 +983,8 @@ void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 	sctp_ulpq_reap_ordered(ulpq, sid);
 }
 
-static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
-		struct sk_buff_head *list, __u16 needed)
+__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
+			    __u16 needed)
 {
 	__u16 freed = 0;
 	__u32 tsn, last_tsn;
@@ -1132,7 +1141,7 @@ void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
 				       &sctp_sk(sk)->subscribe))
 		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
 					      SCTP_PARTIAL_DELIVERY_ABORTED,
-					      gfp);
+					      0, 0, 0, gfp);
 	if (ev)
 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
 
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 6451c50..daf8075 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -520,7 +520,7 @@ static int smc_connect_rdma(struct smc_sock *smc)
 	smc->use_fallback = true;
 	if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
 		rc = smc_clc_send_decline(smc, reason_code);
-		if (rc < sizeof(struct smc_clc_msg_decline))
+		if (rc < 0)
 			goto out_err;
 	}
 	goto out_connected;
@@ -751,14 +751,16 @@ static void smc_listen_work(struct work_struct *work)
 {
 	struct smc_sock *new_smc = container_of(work, struct smc_sock,
 						smc_listen_work);
+	struct smc_clc_msg_proposal_prefix *pclc_prfx;
 	struct socket *newclcsock = new_smc->clcsock;
 	struct smc_sock *lsmc = new_smc->listen_smc;
 	struct smc_clc_msg_accept_confirm cclc;
 	int local_contact = SMC_REUSE_CONTACT;
 	struct sock *newsmcsk = &new_smc->sk;
-	struct smc_clc_msg_proposal pclc;
+	struct smc_clc_msg_proposal *pclc;
 	struct smc_ib_device *smcibdev;
 	struct sockaddr_in peeraddr;
+	u8 buf[SMC_CLC_MAX_LEN];
 	struct smc_link *link;
 	int reason_code = 0;
 	int rc = 0, len;
@@ -775,7 +777,7 @@ static void smc_listen_work(struct work_struct *work)
 	/* do inband token exchange -
 	 *wait for and receive SMC Proposal CLC message
 	 */
-	reason_code = smc_clc_wait_msg(new_smc, &pclc, sizeof(pclc),
+	reason_code = smc_clc_wait_msg(new_smc, &buf, sizeof(buf),
 				       SMC_CLC_PROPOSAL);
 	if (reason_code < 0)
 		goto out_err;
@@ -804,8 +806,11 @@ static void smc_listen_work(struct work_struct *work)
 		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
 		goto decline_rdma;
 	}
-	if ((pclc.outgoing_subnet != subnet) ||
-	    (pclc.prefix_len != prefix_len)) {
+
+	pclc = (struct smc_clc_msg_proposal *)&buf;
+	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+	if (pclc_prfx->outgoing_subnet != subnet ||
+	    pclc_prfx->prefix_len != prefix_len) {
 		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
 		goto decline_rdma;
 	}
@@ -816,7 +821,7 @@ static void smc_listen_work(struct work_struct *work)
 	/* allocate connection / link group */
 	mutex_lock(&smc_create_lgr_pending);
 	local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr,
-					smcibdev, ibport, &pclc.lcl, 0);
+					smcibdev, ibport, &pclc->lcl, 0);
 	if (local_contact < 0) {
 		rc = local_contact;
 		if (rc == -ENOMEM)
@@ -879,11 +884,9 @@ static void smc_listen_work(struct work_struct *work)
 		}
 		/* QP confirmation over RoCE fabric */
 		reason_code = smc_serv_conf_first_link(new_smc);
-		if (reason_code < 0) {
+		if (reason_code < 0)
 			/* peer is not aware of a problem */
-			rc = reason_code;
 			goto out_err_unlock;
-		}
 		if (reason_code > 0)
 			goto decline_rdma_unlock;
 	}
@@ -916,8 +919,7 @@ static void smc_listen_work(struct work_struct *work)
 	smc_conn_free(&new_smc->conn);
 	new_smc->use_fallback = true;
 	if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
-		rc = smc_clc_send_decline(new_smc, reason_code);
-		if (rc < sizeof(struct smc_clc_msg_decline))
+		if (smc_clc_send_decline(new_smc, reason_code) < 0)
 			goto out_err;
 	}
 	goto out_connected;
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 87f7bed..d4155ff 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -213,6 +213,9 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
 		/* guarantee 0 <= bytes_to_rcv <= rmbe_size */
 		smp_mb__after_atomic();
 		smc->sk.sk_data_ready(&smc->sk);
+	} else if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
+		   (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req)) {
+		smc->sk.sk_data_ready(&smc->sk);
 	}
 
 	if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
@@ -234,15 +237,6 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
 		/* trigger socket release if connection closed */
 		smc_close_wake_tx_prepared(smc);
 	}
-
-	/* socket connected but not accepted */
-	if (!smc->sk.sk_socket)
-		return;
-
-	/* data available */
-	if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
-	    (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req))
-		smc_tx_consumer_update(conn);
 }
 
 /* called under tasklet context */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 1800e16..abf7ceb 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -22,6 +22,54 @@
 #include "smc_clc.h"
 #include "smc_ib.h"
 
+/* check if received message has a correct header length and contains valid
+ * heading and trailing eyecatchers
+ */
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+{
+	struct smc_clc_msg_proposal_prefix *pclc_prfx;
+	struct smc_clc_msg_accept_confirm *clc;
+	struct smc_clc_msg_proposal *pclc;
+	struct smc_clc_msg_decline *dclc;
+	struct smc_clc_msg_trail *trl;
+
+	if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+		return false;
+	switch (clcm->type) {
+	case SMC_CLC_PROPOSAL:
+		pclc = (struct smc_clc_msg_proposal *)clcm;
+		pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+		if (ntohs(pclc->hdr.length) !=
+			sizeof(*pclc) + ntohs(pclc->iparea_offset) +
+			sizeof(*pclc_prfx) +
+			pclc_prfx->ipv6_prefixes_cnt *
+				sizeof(struct smc_clc_ipv6_prefix) +
+			sizeof(*trl))
+			return false;
+		trl = (struct smc_clc_msg_trail *)
+			((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
+		break;
+	case SMC_CLC_ACCEPT:
+	case SMC_CLC_CONFIRM:
+		clc = (struct smc_clc_msg_accept_confirm *)clcm;
+		if (ntohs(clc->hdr.length) != sizeof(*clc))
+			return false;
+		trl = &clc->trl;
+		break;
+	case SMC_CLC_DECLINE:
+		dclc = (struct smc_clc_msg_decline *)clcm;
+		if (ntohs(dclc->hdr.length) != sizeof(*dclc))
+			return false;
+		trl = &dclc->trl;
+		break;
+	default:
+		return false;
+	}
+	if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+		return false;
+	return true;
+}
+
 /* Wait for data on the tcp-socket, analyze received data
  * Returns:
  * 0 if success and it was not a decline that we received.
@@ -72,9 +120,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 	}
 	datlen = ntohs(clcm->length);
 	if ((len < sizeof(struct smc_clc_msg_hdr)) ||
-	    (datlen < sizeof(struct smc_clc_msg_decline)) ||
-	    (datlen > sizeof(struct smc_clc_msg_accept_confirm)) ||
-	    memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) ||
+	    (datlen > buflen) ||
 	    ((clcm->type != SMC_CLC_DECLINE) &&
 	     (clcm->type != expected_type))) {
 		smc->sk.sk_err = EPROTO;
@@ -89,7 +135,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 	krflags = MSG_WAITALL;
 	smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
 	len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, datlen, krflags);
-	if (len < datlen) {
+	if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
 		smc->sk.sk_err = EPROTO;
 		reason_code = -EPROTO;
 		goto out;
@@ -133,7 +179,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
 		smc->sk.sk_err = EPROTO;
 	if (len < 0)
 		smc->sk.sk_err = -len;
-	return len;
+	return sock_error(&smc->sk);
 }
 
 /* send CLC PROPOSAL message across internal TCP socket */
@@ -141,33 +187,43 @@ int smc_clc_send_proposal(struct smc_sock *smc,
 			  struct smc_ib_device *smcibdev,
 			  u8 ibport)
 {
+	struct smc_clc_msg_proposal_prefix pclc_prfx;
 	struct smc_clc_msg_proposal pclc;
+	struct smc_clc_msg_trail trl;
 	int reason_code = 0;
+	struct kvec vec[3];
 	struct msghdr msg;
-	struct kvec vec;
-	int len, rc;
+	int len, plen, rc;
 
 	/* send SMC Proposal CLC message */
+	plen = sizeof(pclc) + sizeof(pclc_prfx) + sizeof(trl);
 	memset(&pclc, 0, sizeof(pclc));
 	memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
 	pclc.hdr.type = SMC_CLC_PROPOSAL;
-	pclc.hdr.length = htons(sizeof(pclc));
+	pclc.hdr.length = htons(plen);
 	pclc.hdr.version = SMC_CLC_V1;		/* SMC version */
 	memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
 	memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
 	memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
+	pclc.iparea_offset = htons(0);
 
+	memset(&pclc_prfx, 0, sizeof(pclc_prfx));
 	/* determine subnet and mask from internal TCP socket */
-	rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet,
-				  &pclc.prefix_len);
+	rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc_prfx.outgoing_subnet,
+				  &pclc_prfx.prefix_len);
 	if (rc)
 		return SMC_CLC_DECL_CNFERR; /* configuration error */
-	memcpy(pclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+	pclc_prfx.ipv6_prefixes_cnt = 0;
+	memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
 	memset(&msg, 0, sizeof(msg));
-	vec.iov_base = &pclc;
-	vec.iov_len = sizeof(pclc);
+	vec[0].iov_base = &pclc;
+	vec[0].iov_len = sizeof(pclc);
+	vec[1].iov_base = &pclc_prfx;
+	vec[1].iov_len = sizeof(pclc_prfx);
+	vec[2].iov_base = &trl;
+	vec[2].iov_len = sizeof(trl);
 	/* due to the few bytes needed for clc-handshake this cannot block */
-	len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(pclc));
+	len = kernel_sendmsg(smc->clcsock, &msg, vec, 3, plen);
 	if (len < sizeof(pclc)) {
 		if (len >= 0) {
 			reason_code = -ENETUNREACH;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 12a9af1..c145a0f 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -44,7 +44,7 @@ struct smc_clc_msg_hdr {	/* header1 of clc messages */
 #if defined(__BIG_ENDIAN_BITFIELD)
 	u8 version : 4,
 	   flag    : 1,
-	   rsvd	   : 3;
+	   rsvd    : 3;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
 	u8 rsvd    : 3,
 	   flag    : 1,
@@ -62,17 +62,31 @@ struct smc_clc_msg_local {	/* header2 of clc messages */
 	u8 mac[6];		/* mac of ib_device port */
 };
 
-struct smc_clc_msg_proposal {	/* clc proposal message */
-	struct smc_clc_msg_hdr hdr;
-	struct smc_clc_msg_local lcl;
-	__be16 iparea_offset;	/* offset to IP address information area */
+struct smc_clc_ipv6_prefix {
+	u8 prefix[4];
+	u8 prefix_len;
+} __packed;
+
+struct smc_clc_msg_proposal_prefix {	/* prefix part of clc proposal message*/
 	__be32 outgoing_subnet;	/* subnet mask */
 	u8 prefix_len;		/* number of significant bits in mask */
 	u8 reserved[2];
 	u8 ipv6_prefixes_cnt;	/* number of IPv6 prefixes in prefix array */
-	struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
 } __aligned(4);
 
+struct smc_clc_msg_proposal {	/* clc proposal message sent by Linux */
+	struct smc_clc_msg_hdr hdr;
+	struct smc_clc_msg_local lcl;
+	__be16 iparea_offset;	/* offset to IP address information area */
+} __aligned(4);
+
+#define SMC_CLC_PROPOSAL_MAX_OFFSET	0x28
+#define SMC_CLC_PROPOSAL_MAX_PREFIX	(8 * sizeof(struct smc_clc_ipv6_prefix))
+#define SMC_CLC_MAX_LEN		(sizeof(struct smc_clc_msg_proposal) + \
+				 SMC_CLC_PROPOSAL_MAX_OFFSET + \
+				 SMC_CLC_PROPOSAL_MAX_PREFIX + \
+				 sizeof(struct smc_clc_msg_trail))
+
 struct smc_clc_msg_accept_confirm {	/* clc accept / confirm message */
 	struct smc_clc_msg_hdr hdr;
 	struct smc_clc_msg_local lcl;
@@ -102,6 +116,14 @@ struct smc_clc_msg_decline {	/* clc decline message */
 	struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
 } __aligned(4);
 
+/* determine start of the prefix area within the proposal message */
+static inline struct smc_clc_msg_proposal_prefix *
+smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
+{
+	return (struct smc_clc_msg_proposal_prefix *)
+	       ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
+}
+
 struct smc_sock;
 struct smc_ib_device;
 
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 48615d2..e194c6c 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -113,7 +113,7 @@ static int smc_close_abort(struct smc_connection *conn)
 /* terminate smc socket abnormally - active abort
  * RDMA communication no longer possible
  */
-void smc_close_active_abort(struct smc_sock *smc)
+static void smc_close_active_abort(struct smc_sock *smc)
 {
 	struct smc_cdc_conn_state_flags *txflags =
 		&smc->conn.local_tx_ctrl.conn_state_flags;
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h
index ed82506..8c49888 100644
--- a/net/smc/smc_close.h
+++ b/net/smc/smc_close.h
@@ -20,7 +20,6 @@
 #define SMC_CLOSE_SOCK_PUT_DELAY		HZ
 
 void smc_close_wake_tx_prepared(struct smc_sock *smc);
-void smc_close_active_abort(struct smc_sock *smc);
 int smc_close_active(struct smc_sock *smc);
 void smc_close_sock_put_work(struct work_struct *work);
 int smc_close_shutdown_write(struct smc_sock *smc);
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index cbf5863..9dc392c 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -65,7 +65,6 @@ static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
 	rc = sk_wait_event(sk, timeo,
 			   sk->sk_err ||
 			   sk->sk_shutdown & RCV_SHUTDOWN ||
-			   sock_flag(sk, SOCK_DONE) ||
 			   atomic_read(&conn->bytes_to_rcv) ||
 			   smc_cdc_rxed_any_close_or_senddone(conn),
 			   &wait);
@@ -116,7 +115,7 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
 		if (read_done) {
 			if (sk->sk_err ||
 			    sk->sk_state == SMC_CLOSED ||
-			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
+			    sk->sk_shutdown & RCV_SHUTDOWN ||
 			    !timeo ||
 			    signal_pending(current) ||
 			    smc_cdc_rxed_any_close_or_senddone(conn) ||
@@ -124,8 +123,6 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
 			    peer_conn_abort)
 				break;
 		} else {
-			if (sock_flag(sk, SOCK_DONE))
-				break;
 			if (sk->sk_err) {
 				read_done = sock_error(sk);
 				break;
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index c48dc2d..2e50fdd 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -104,14 +104,12 @@ static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
 		if (atomic_read(&conn->sndbuf_space))
 			break; /* at least 1 byte of free space available */
 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-		sk->sk_write_pending++;
 		sk_wait_event(sk, &timeo,
 			      sk->sk_err ||
 			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
 			      smc_cdc_rxed_any_close_or_senddone(conn) ||
 			      atomic_read(&conn->sndbuf_space),
 			      &wait);
-		sk->sk_write_pending--;
 	}
 	remove_wait_queue(sk_sleep(sk), &wait);
 	return rc;
@@ -450,9 +448,7 @@ static void smc_tx_work(struct work_struct *work)
 void smc_tx_consumer_update(struct smc_connection *conn)
 {
 	union smc_host_cursor cfed, cons;
-	struct smc_cdc_tx_pend *pend;
-	struct smc_wr_buf *wr_buf;
-	int to_confirm, rc;
+	int to_confirm;
 
 	smc_curs_write(&cons,
 		       smc_curs_read(&conn->local_tx_ctrl.cons, conn),
@@ -466,10 +462,7 @@ void smc_tx_consumer_update(struct smc_connection *conn)
 	    ((to_confirm > conn->rmbe_update_limit) &&
 	     ((to_confirm > (conn->rmbe_size / 2)) ||
 	      conn->local_rx_ctrl.prod_flags.write_blocked))) {
-		rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
-		if (!rc)
-			rc = smc_cdc_msg_send(conn, wr_buf, pend);
-		if (rc < 0) {
+		if (smc_cdc_get_slot_and_msg_send(conn) < 0) {
 			schedule_delayed_work(&conn->tx_work,
 					      SMC_TX_WORK_DELAY);
 			return;
diff --git a/net/socket.c b/net/socket.c
index 6f05d5c..fbfae1e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -163,12 +163,6 @@ static DEFINE_SPINLOCK(net_family_lock);
 static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
- *	Statistics counters of the socket lists
- */
-
-static DEFINE_PER_CPU(int, sockets_in_use);
-
-/*
  * Support routines.
  * Move socket addresses back and forth across the kernel/user
  * divide and look after the messy bits.
@@ -580,7 +574,6 @@ struct socket *sock_alloc(void)
 	inode->i_gid = current_fsgid();
 	inode->i_op = &sockfs_inode_ops;
 
-	this_cpu_add(sockets_in_use, 1);
 	return sock;
 }
 EXPORT_SYMBOL(sock_alloc);
@@ -607,7 +600,6 @@ void sock_release(struct socket *sock)
 	if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
 		pr_err("%s: fasync list not empty!\n", __func__);
 
-	this_cpu_sub(sockets_in_use, 1);
 	if (!sock->file) {
 		iput(SOCK_INODE(sock));
 		return;
@@ -2633,17 +2625,8 @@ pure_initcall(jit_init);
 #ifdef CONFIG_PROC_FS
 void socket_seq_show(struct seq_file *seq)
 {
-	int cpu;
-	int counter = 0;
-
-	for_each_possible_cpu(cpu)
-	    counter += per_cpu(sockets_in_use, cpu);
-
-	/* It can be negative, by the way. 8) */
-	if (counter < 0)
-		counter = 0;
-
-	seq_printf(seq, "sockets: used %d\n", counter);
+	seq_printf(seq, "sockets: used %d\n",
+		   sock_inuse_get(seq->private));
 }
 #endif				/* CONFIG_PROC_FS */
 
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 329325bd..37892b3 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.c: TIPC broadcast code
  *
- * Copyright (c) 2004-2006, 2014-2016, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
@@ -42,8 +42,8 @@
 #include "link.h"
 #include "name_table.h"
 
-#define	BCLINK_WIN_DEFAULT	50	/* bcast link window size (default) */
-#define	BCLINK_WIN_MIN	        32	/* bcast minimum link window size */
+#define BCLINK_WIN_DEFAULT  50	/* bcast link window size (default) */
+#define BCLINK_WIN_MIN      32	/* bcast minimum link window size */
 
 const char tipc_bclink_name[] = "broadcast-link";
 
@@ -74,6 +74,10 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
 	return tipc_net(net)->bcbase;
 }
 
+/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
+ * Note: the MTU is decremented to give room for a tunnel header, in
+ * case the message needs to be sent as replicast
+ */
 int tipc_bcast_get_mtu(struct net *net)
 {
 	return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
@@ -515,7 +519,7 @@ int tipc_bcast_init(struct net *net)
 	spin_lock_init(&tipc_net(net)->bclock);
 
 	if (!tipc_link_bc_create(net, 0, 0,
-				 U16_MAX,
+				 FB_MTU,
 				 BCLINK_WIN_DEFAULT,
 				 0,
 				 &bb->inputq,
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 9643426..20b21af 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -49,7 +49,6 @@
 #include <linux/uaccess.h>
 #include <linux/interrupt.h>
 #include <linux/atomic.h>
-#include <asm/hardirq.h>
 #include <linux/netdevice.h>
 #include <linux/in.h>
 #include <linux/list.h>
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 5f4ffae..497ee34 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -49,8 +49,6 @@
 #define ADV_ACTIVE (ADV_UNIT * 12)
 
 enum mbr_state {
-	MBR_QUARANTINED,
-	MBR_DISCOVERED,
 	MBR_JOINING,
 	MBR_PUBLISHED,
 	MBR_JOINED,
@@ -64,8 +62,7 @@ enum mbr_state {
 struct tipc_member {
 	struct rb_node tree_node;
 	struct list_head list;
-	struct list_head congested;
-	struct sk_buff *event_msg;
+	struct list_head small_win;
 	struct sk_buff_head deferredq;
 	struct tipc_group *group;
 	u32 node;
@@ -77,21 +74,18 @@ struct tipc_member {
 	u16 bc_rcv_nxt;
 	u16 bc_syncpt;
 	u16 bc_acked;
-	bool usr_pending;
 };
 
 struct tipc_group {
 	struct rb_root members;
-	struct list_head congested;
+	struct list_head small_win;
 	struct list_head pending;
 	struct list_head active;
-	struct list_head reclaiming;
 	struct tipc_nlist dests;
 	struct net *net;
 	int subid;
 	u32 type;
 	u32 instance;
-	u32 domain;
 	u32 scope;
 	u32 portid;
 	u16 member_cnt;
@@ -101,11 +95,27 @@ struct tipc_group {
 	u16 bc_ackers;
 	bool loopback;
 	bool events;
+	bool open;
 };
 
 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
 				  int mtyp, struct sk_buff_head *xmitq);
 
+bool tipc_group_is_open(struct tipc_group *grp)
+{
+	return grp->open;
+}
+
+static void tipc_group_open(struct tipc_member *m, bool *wakeup)
+{
+	*wakeup = false;
+	if (list_empty(&m->small_win))
+		return;
+	list_del_init(&m->small_win);
+	m->group->open = true;
+	*wakeup = true;
+}
+
 static void tipc_group_decr_active(struct tipc_group *grp,
 				   struct tipc_member *m)
 {
@@ -137,14 +147,14 @@ u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
 	return grp->bc_snd_nxt;
 }
 
-static bool tipc_group_is_enabled(struct tipc_member *m)
-{
-	return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING;
-}
-
 static bool tipc_group_is_receiver(struct tipc_member *m)
 {
-	return m && m->state >= MBR_JOINED;
+	return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
+}
+
+static bool tipc_group_is_sender(struct tipc_member *m)
+{
+	return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED;
 }
 
 u32 tipc_group_exclude(struct tipc_group *grp)
@@ -162,6 +172,8 @@ int tipc_group_size(struct tipc_group *grp)
 struct tipc_group *tipc_group_create(struct net *net, u32 portid,
 				     struct tipc_group_req *mreq)
 {
+	u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
+	bool global = mreq->scope != TIPC_NODE_SCOPE;
 	struct tipc_group *grp;
 	u32 type = mreq->type;
 
@@ -169,25 +181,40 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid,
 	if (!grp)
 		return NULL;
 	tipc_nlist_init(&grp->dests, tipc_own_addr(net));
-	INIT_LIST_HEAD(&grp->congested);
+	INIT_LIST_HEAD(&grp->small_win);
 	INIT_LIST_HEAD(&grp->active);
 	INIT_LIST_HEAD(&grp->pending);
-	INIT_LIST_HEAD(&grp->reclaiming);
 	grp->members = RB_ROOT;
 	grp->net = net;
 	grp->portid = portid;
-	grp->domain = addr_domain(net, mreq->scope);
 	grp->type = type;
 	grp->instance = mreq->instance;
 	grp->scope = mreq->scope;
 	grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
 	grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
-	if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, &grp->subid))
+	filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
+	if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
+				    filter, &grp->subid))
 		return grp;
 	kfree(grp);
 	return NULL;
 }
 
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
+{
+	struct rb_root *tree = &grp->members;
+	struct tipc_member *m, *tmp;
+	struct sk_buff_head xmitq;
+
+	skb_queue_head_init(&xmitq);
+	rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+		tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
+		tipc_group_update_member(m, 0);
+	}
+	tipc_node_distr_xmit(net, &xmitq);
+	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
+}
+
 void tipc_group_delete(struct net *net, struct tipc_group *grp)
 {
 	struct rb_root *tree = &grp->members;
@@ -233,7 +260,7 @@ static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
 	struct tipc_member *m;
 
 	m = tipc_group_find_member(grp, node, port);
-	if (m && tipc_group_is_enabled(m))
+	if (m && tipc_group_is_receiver(m))
 		return m;
 	return NULL;
 }
@@ -278,7 +305,7 @@ static void tipc_group_add_to_tree(struct tipc_group *grp,
 
 static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
 						    u32 node, u32 port,
-						    int state)
+						    u32 instance, int state)
 {
 	struct tipc_member *m;
 
@@ -286,11 +313,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
 	if (!m)
 		return NULL;
 	INIT_LIST_HEAD(&m->list);
-	INIT_LIST_HEAD(&m->congested);
+	INIT_LIST_HEAD(&m->small_win);
 	__skb_queue_head_init(&m->deferredq);
 	m->group = grp;
 	m->node = node;
 	m->port = port;
+	m->instance = instance;
 	m->bc_acked = grp->bc_snd_nxt - 1;
 	grp->member_cnt++;
 	tipc_group_add_to_tree(grp, m);
@@ -299,9 +327,10 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
 	return m;
 }
 
-void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port)
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+			   u32 port, u32 instance)
 {
-	tipc_group_create_member(grp, node, port, MBR_DISCOVERED);
+	tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
 }
 
 static void tipc_group_delete_member(struct tipc_group *grp,
@@ -315,7 +344,7 @@ static void tipc_group_delete_member(struct tipc_group *grp,
 		grp->bc_ackers--;
 
 	list_del_init(&m->list);
-	list_del_init(&m->congested);
+	list_del_init(&m->small_win);
 	tipc_group_decr_active(grp, m);
 
 	/* If last member on a node, remove node from dest list */
@@ -344,7 +373,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
 	struct tipc_group *grp = m->group;
 	struct tipc_member *_m, *tmp;
 
-	if (!tipc_group_is_enabled(m))
+	if (!tipc_group_is_receiver(m))
 		return;
 
 	m->window -= len;
@@ -352,16 +381,14 @@ void tipc_group_update_member(struct tipc_member *m, int len)
 	if (m->window >= ADV_IDLE)
 		return;
 
-	list_del_init(&m->congested);
+	list_del_init(&m->small_win);
 
-	/* Sort member into congested members' list */
-	list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
-		if (m->window > _m->window)
-			continue;
-		list_add_tail(&m->congested, &_m->congested);
-		return;
+	/* Sort member into small_window members' list */
+	list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
+		if (_m->window > m->window)
+			break;
 	}
-	list_add_tail(&m->congested, &grp->congested);
+	list_add_tail(&m->small_win, &_m->small_win);
 }
 
 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
@@ -373,7 +400,7 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
 
 	for (n = rb_first(&grp->members); n; n = rb_next(n)) {
 		m = container_of(n, struct tipc_member, tree_node);
-		if (tipc_group_is_enabled(m)) {
+		if (tipc_group_is_receiver(m)) {
 			tipc_group_update_member(m, len);
 			m->bc_acked = prev;
 			ackers++;
@@ -394,20 +421,20 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
 	int adv, state;
 
 	m = tipc_group_find_dest(grp, dnode, dport);
-	*mbr = m;
-	if (!m)
+	if (!tipc_group_is_receiver(m)) {
+		*mbr = NULL;
 		return false;
-	if (m->usr_pending)
-		return true;
+	}
+	*mbr = m;
+
 	if (m->window >= len)
 		return false;
-	m->usr_pending = true;
+
+	grp->open = false;
 
 	/* If not fully advertised, do it now to prevent mutual blocking */
 	adv = m->advertised;
 	state = m->state;
-	if (state < MBR_JOINED)
-		return true;
 	if (state == MBR_JOINED && adv == ADV_IDLE)
 		return true;
 	if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
@@ -425,13 +452,14 @@ bool tipc_group_bc_cong(struct tipc_group *grp, int len)
 	struct tipc_member *m = NULL;
 
 	/* If prev bcast was replicast, reject until all receivers have acked */
-	if (grp->bc_ackers)
+	if (grp->bc_ackers) {
+		grp->open = false;
 		return true;
-
-	if (list_empty(&grp->congested))
+	}
+	if (list_empty(&grp->small_win))
 		return false;
 
-	m = list_first_entry(&grp->congested, struct tipc_member, congested);
+	m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
 	if (m->window >= len)
 		return false;
 
@@ -486,7 +514,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
 		goto drop;
 
 	m = tipc_group_find_member(grp, node, port);
-	if (!tipc_group_is_receiver(m))
+	if (!tipc_group_is_sender(m))
 		goto drop;
 
 	if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
@@ -573,24 +601,34 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 
 	switch (m->state) {
 	case MBR_JOINED:
-		/* Reclaim advertised space from least active member */
-		if (!list_empty(active) && active_cnt >= reclaim_limit) {
-			rm = list_first_entry(active, struct tipc_member, list);
-			rm->state = MBR_RECLAIMING;
-			list_move_tail(&rm->list, &grp->reclaiming);
-			tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
-		}
-		/* If max active, become pending and wait for reclaimed space */
-		if (active_cnt >= max_active) {
+		/* First, decide if member can go active */
+		if (active_cnt <= max_active) {
+			m->state = MBR_ACTIVE;
+			list_add_tail(&m->list, active);
+			grp->active_cnt++;
+			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+		} else {
 			m->state = MBR_PENDING;
 			list_add_tail(&m->list, &grp->pending);
+		}
+
+		if (active_cnt < reclaim_limit)
+			break;
+
+		/* Reclaim from oldest active member, if possible */
+		if (!list_empty(active)) {
+			rm = list_first_entry(active, struct tipc_member, list);
+			rm->state = MBR_RECLAIMING;
+			list_del_init(&rm->list);
+			tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
 			break;
 		}
-		/* Otherwise become active */
-		m->state = MBR_ACTIVE;
-		list_add_tail(&m->list, &grp->active);
-		grp->active_cnt++;
-		/* Fall through */
+		/* Nobody to reclaim from; - revert oldest pending to JOINED */
+		pm = list_first_entry(&grp->pending, struct tipc_member, list);
+		list_del_init(&pm->list);
+		pm->state = MBR_JOINED;
+		tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+		break;
 	case MBR_ACTIVE:
 		if (!list_is_last(&m->list, &grp->active))
 			list_move_tail(&m->list, &grp->active);
@@ -602,12 +640,12 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 		if (m->advertised > ADV_IDLE)
 			break;
 		m->state = MBR_JOINED;
+		grp->active_cnt--;
 		if (m->advertised < ADV_IDLE) {
 			pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
 			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
 		}
-		grp->active_cnt--;
-		list_del_init(&m->list);
+
 		if (list_empty(&grp->pending))
 			return;
 
@@ -619,7 +657,6 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 		tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
 		break;
 	case MBR_RECLAIMING:
-	case MBR_DISCOVERED:
 	case MBR_JOINING:
 	case MBR_LEAVING:
 	default:
@@ -627,6 +664,40 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 	}
 }
 
+static void tipc_group_create_event(struct tipc_group *grp,
+				    struct tipc_member *m,
+				    u32 event, u16 seqno,
+				    struct sk_buff_head *inputq)
+{	u32 dnode = tipc_own_addr(grp->net);
+	struct tipc_event evt;
+	struct sk_buff *skb;
+	struct tipc_msg *hdr;
+
+	evt.event = event;
+	evt.found_lower = m->instance;
+	evt.found_upper = m->instance;
+	evt.port.ref = m->port;
+	evt.port.node = m->node;
+	evt.s.seq.type = grp->type;
+	evt.s.seq.lower = m->instance;
+	evt.s.seq.upper = m->instance;
+
+	skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
+			      GROUP_H_SIZE, sizeof(evt), dnode, m->node,
+			      grp->portid, m->port, 0);
+	if (!skb)
+		return;
+
+	hdr = buf_msg(skb);
+	msg_set_nametype(hdr, grp->type);
+	msg_set_grp_evt(hdr, event);
+	msg_set_dest_droppable(hdr, true);
+	msg_set_grp_bc_seqno(hdr, seqno);
+	memcpy(msg_data(hdr), &evt, sizeof(evt));
+	TIPC_SKB_CB(skb)->orig_member = m->instance;
+	__skb_queue_tail(inputq, skb);
+}
+
 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
 				  int mtyp, struct sk_buff_head *xmitq)
 {
@@ -672,83 +743,73 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 	u32 node = msg_orignode(hdr);
 	u32 port = msg_origport(hdr);
 	struct tipc_member *m, *pm;
-	struct tipc_msg *ehdr;
 	u16 remitted, in_flight;
 
 	if (!grp)
 		return;
 
+	if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
+		return;
+
 	m = tipc_group_find_member(grp, node, port);
 
 	switch (msg_type(hdr)) {
 	case GRP_JOIN_MSG:
 		if (!m)
 			m = tipc_group_create_member(grp, node, port,
-						     MBR_QUARANTINED);
+						     0, MBR_JOINING);
 		if (!m)
 			return;
 		m->bc_syncpt = msg_grp_bc_syncpt(hdr);
 		m->bc_rcv_nxt = m->bc_syncpt;
 		m->window += msg_adv_win(hdr);
 
-		/* Wait until PUBLISH event is received */
-		if (m->state == MBR_DISCOVERED) {
-			m->state = MBR_JOINING;
-		} else if (m->state == MBR_PUBLISHED) {
-			m->state = MBR_JOINED;
-			*usr_wakeup = true;
-			m->usr_pending = false;
-			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
-			ehdr = buf_msg(m->event_msg);
-			msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
-			__skb_queue_tail(inputq, m->event_msg);
-		}
-		list_del_init(&m->congested);
+		/* Wait until PUBLISH event is received if necessary */
+		if (m->state != MBR_PUBLISHED)
+			return;
+
+		/* Member can be taken into service */
+		m->state = MBR_JOINED;
+		tipc_group_open(m, usr_wakeup);
 		tipc_group_update_member(m, 0);
+		tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+		tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+					m->bc_syncpt, inputq);
 		return;
 	case GRP_LEAVE_MSG:
 		if (!m)
 			return;
 		m->bc_syncpt = msg_grp_bc_syncpt(hdr);
 		list_del_init(&m->list);
-		list_del_init(&m->congested);
-		*usr_wakeup = true;
-
-		/* Wait until WITHDRAW event is received */
-		if (m->state != MBR_LEAVING) {
-			tipc_group_decr_active(grp, m);
-			m->state = MBR_LEAVING;
-			return;
-		}
-		/* Otherwise deliver already received WITHDRAW event */
-		ehdr = buf_msg(m->event_msg);
-		msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
-		__skb_queue_tail(inputq, m->event_msg);
+		tipc_group_open(m, usr_wakeup);
+		tipc_group_decr_active(grp, m);
+		m->state = MBR_LEAVING;
+		tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+					m->bc_syncpt, inputq);
 		return;
 	case GRP_ADV_MSG:
 		if (!m)
 			return;
 		m->window += msg_adv_win(hdr);
-		*usr_wakeup = m->usr_pending;
-		m->usr_pending = false;
-		list_del_init(&m->congested);
+		tipc_group_open(m, usr_wakeup);
 		return;
 	case GRP_ACK_MSG:
 		if (!m)
 			return;
 		m->bc_acked = msg_grp_bc_acked(hdr);
 		if (--grp->bc_ackers)
-			break;
+			return;
+		list_del_init(&m->small_win);
+		m->group->open = true;
 		*usr_wakeup = true;
-		m->usr_pending = false;
+		tipc_group_update_member(m, 0);
 		return;
 	case GRP_RECLAIM_MSG:
 		if (!m)
 			return;
-		*usr_wakeup = m->usr_pending;
-		m->usr_pending = false;
 		tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
 		m->window = ADV_IDLE;
+		tipc_group_open(m, usr_wakeup);
 		return;
 	case GRP_REMIT_MSG:
 		if (!m || m->state != MBR_RECLAIMING)
@@ -763,18 +824,14 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 			m->advertised = ADV_IDLE + in_flight;
 			return;
 		}
-		/* All messages preceding the REMIT have been read */
-		if (m->advertised <= remitted) {
-			m->state = MBR_JOINED;
-			in_flight = 0;
-		}
-		/* ..and the REMIT overtaken by more messages => re-advertise */
+		/* This should never happen */
 		if (m->advertised < remitted)
-			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+			pr_warn_ratelimited("Unexpected REMIT msg\n");
 
-		m->advertised = ADV_IDLE + in_flight;
+		/* All messages preceding the REMIT have been read */
+		m->state = MBR_JOINED;
 		grp->active_cnt--;
-		list_del_init(&m->list);
+		m->advertised = ADV_IDLE;
 
 		/* Set oldest pending member to active and advertise */
 		if (list_empty(&grp->pending))
@@ -796,11 +853,10 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 void tipc_group_member_evt(struct tipc_group *grp,
 			   bool *usr_wakeup,
 			   int *sk_rcvbuf,
-			   struct sk_buff *skb,
+			   struct tipc_msg *hdr,
 			   struct sk_buff_head *inputq,
 			   struct sk_buff_head *xmitq)
 {
-	struct tipc_msg *hdr = buf_msg(skb);
 	struct tipc_event *evt = (void *)msg_data(hdr);
 	u32 instance = evt->found_lower;
 	u32 node = evt->port.node;
@@ -808,89 +864,59 @@ void tipc_group_member_evt(struct tipc_group *grp,
 	int event = evt->event;
 	struct tipc_member *m;
 	struct net *net;
-	bool node_up;
 	u32 self;
 
 	if (!grp)
-		goto drop;
+		return;
 
 	net = grp->net;
 	self = tipc_own_addr(net);
 	if (!grp->loopback && node == self && port == grp->portid)
-		goto drop;
-
-	/* Convert message before delivery to user */
-	msg_set_hdr_sz(hdr, GROUP_H_SIZE);
-	msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
-	msg_set_type(hdr, TIPC_GRP_MEMBER_EVT);
-	msg_set_origport(hdr, port);
-	msg_set_orignode(hdr, node);
-	msg_set_nametype(hdr, grp->type);
-	msg_set_grp_evt(hdr, event);
+		return;
 
 	m = tipc_group_find_member(grp, node, port);
 
-	if (event == TIPC_PUBLISHED) {
-		if (!m)
-			m = tipc_group_create_member(grp, node, port,
-						     MBR_DISCOVERED);
-		if (!m)
-			goto drop;
-
-		/* Hold back event if JOIN message not yet received */
-		if (m->state == MBR_DISCOVERED) {
-			m->event_msg = skb;
-			m->state = MBR_PUBLISHED;
-		} else {
-			msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-			__skb_queue_tail(inputq, skb);
-			m->state = MBR_JOINED;
-			*usr_wakeup = true;
-			m->usr_pending = false;
-		}
-		m->instance = instance;
-		TIPC_SKB_CB(skb)->orig_member = m->instance;
-		tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
-		if (m->window < ADV_IDLE)
+	switch (event) {
+	case TIPC_PUBLISHED:
+		/* Send and wait for arrival of JOIN message if necessary */
+		if (!m) {
+			m = tipc_group_create_member(grp, node, port, instance,
+						     MBR_PUBLISHED);
+			if (!m)
+				break;
 			tipc_group_update_member(m, 0);
-		else
-			list_del_init(&m->congested);
-	} else if (event == TIPC_WITHDRAWN) {
-		if (!m)
-			goto drop;
-
-		TIPC_SKB_CB(skb)->orig_member = m->instance;
-
-		*usr_wakeup = true;
-		m->usr_pending = false;
-		node_up = tipc_node_is_up(net, node);
-		m->event_msg = NULL;
-
-		if (node_up) {
-			/* Hold back event if a LEAVE msg should be expected */
-			if (m->state != MBR_LEAVING) {
-				m->event_msg = skb;
-				tipc_group_decr_active(grp, m);
-				m->state = MBR_LEAVING;
-			} else {
-				msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-				__skb_queue_tail(inputq, skb);
-			}
-		} else {
-			if (m->state != MBR_LEAVING) {
-				tipc_group_decr_active(grp, m);
-				m->state = MBR_LEAVING;
-				msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
-			} else {
-				msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-			}
-			__skb_queue_tail(inputq, skb);
+			tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+			break;
 		}
+
+		if (m->state != MBR_JOINING)
+			break;
+
+		/* Member can be taken into service */
+		m->instance = instance;
+		m->state = MBR_JOINED;
+		tipc_group_open(m, usr_wakeup);
+		tipc_group_update_member(m, 0);
+		tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+		tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+					m->bc_syncpt, inputq);
+		break;
+	case TIPC_WITHDRAWN:
+		if (!m)
+			break;
+
+		tipc_group_decr_active(grp, m);
+		m->state = MBR_LEAVING;
 		list_del_init(&m->list);
-		list_del_init(&m->congested);
+		tipc_group_open(m, usr_wakeup);
+
+		/* Only send event if no LEAVE message can be expected */
+		if (!tipc_node_is_up(net, node))
+			tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+						m->bc_rcv_nxt, inputq);
+		break;
+	default:
+		break;
 	}
 	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
-	return;
-drop:
-	kfree_skb(skb);
 }
diff --git a/net/tipc/group.h b/net/tipc/group.h
index d525e1c..f4a596e 100644
--- a/net/tipc/group.h
+++ b/net/tipc/group.h
@@ -44,8 +44,10 @@ struct tipc_msg;
 
 struct tipc_group *tipc_group_create(struct net *net, u32 portid,
 				     struct tipc_group_req *mreq);
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcv_buf);
 void tipc_group_delete(struct net *net, struct tipc_group *grp);
-void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port);
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+			   u32 port, u32 instance);
 struct tipc_nlist *tipc_group_dests(struct tipc_group *grp);
 void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
 		     int *scope);
@@ -54,7 +56,7 @@ void tipc_group_filter_msg(struct tipc_group *grp,
 			   struct sk_buff_head *inputq,
 			   struct sk_buff_head *xmitq);
 void tipc_group_member_evt(struct tipc_group *grp, bool *wakeup,
-			   int *sk_rcvbuf, struct sk_buff *skb,
+			   int *sk_rcvbuf, struct tipc_msg *hdr,
 			   struct sk_buff_head *inputq,
 			   struct sk_buff_head *xmitq);
 void tipc_group_proto_rcv(struct tipc_group *grp, bool *wakeup,
@@ -65,9 +67,9 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack);
 bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
 		     int len, struct tipc_member **m);
 bool tipc_group_bc_cong(struct tipc_group *grp, int len);
+bool tipc_group_is_open(struct tipc_group *grp);
 void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
 			       u32 port, struct sk_buff_head *xmitq);
 u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
 void tipc_group_update_member(struct tipc_member *m, int len);
-int tipc_group_size(struct tipc_group *grp);
 #endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 6bce0b1..2d6b2ae 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -483,7 +483,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
 /**
  * tipc_link_bc_create - create new link to be used for broadcast
  * @n: pointer to associated node
- * @mtu: mtu to be used
+ * @mtu: mtu to be used initially if no peers
  * @window: send window to be used
  * @inputq: queue to put messages ready for delivery
  * @namedq: queue to put binding table update messages ready for delivery
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index b0d07b3..55d8ba9 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -251,20 +251,23 @@ bool tipc_msg_validate(struct sk_buff **_skb)
  * @pktmax: Max packet size that can be used
  * @list: Buffer or chain of buffers to be returned to caller
  *
+ * Note that the recursive call we are making here is safe, since it can
+ * logically go only one further level down.
+ *
  * Returns message data size or errno: -ENOMEM, -EFAULT
  */
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
-		   int offset, int dsz, int pktmax, struct sk_buff_head *list)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+		   int dsz, int pktmax, struct sk_buff_head *list)
 {
 	int mhsz = msg_hdr_sz(mhdr);
-	int msz = mhsz + dsz;
-	int pktno = 1;
-	int pktsz;
-	int pktrem = pktmax;
-	int drem = dsz;
 	struct tipc_msg pkthdr;
+	int msz = mhsz + dsz;
+	int pktrem = pktmax;
 	struct sk_buff *skb;
+	int drem = dsz;
+	int pktno = 1;
 	char *pktpos;
+	int pktsz;
 	int rc;
 
 	msg_set_size(mhdr, msz);
@@ -272,8 +275,18 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 	/* No fragmentation needed? */
 	if (likely(msz <= pktmax)) {
 		skb = tipc_buf_acquire(msz, GFP_KERNEL);
-		if (unlikely(!skb))
+
+		/* Fall back to smaller MTU if node local message */
+		if (unlikely(!skb)) {
+			if (pktmax != MAX_MSG_SIZE)
+				return -ENOMEM;
+			rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list);
+			if (rc != dsz)
+				return rc;
+			if (tipc_msg_assemble(list))
+				return dsz;
 			return -ENOMEM;
+		}
 		skb_orphan(skb);
 		__skb_queue_tail(list, skb);
 		skb_copy_to_linear_data(skb, mhdr, mhsz);
@@ -589,6 +602,30 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 	return true;
 }
 
+/* tipc_msg_assemble() - assemble chain of fragments into one message
+ */
+bool tipc_msg_assemble(struct sk_buff_head *list)
+{
+	struct sk_buff *skb, *tmp = NULL;
+
+	if (skb_queue_len(list) == 1)
+		return true;
+
+	while ((skb = __skb_dequeue(list))) {
+		skb->next = NULL;
+		if (tipc_buf_append(&tmp, &skb)) {
+			__skb_queue_tail(list, skb);
+			return true;
+		}
+		if (!tmp)
+			break;
+	}
+	__skb_queue_purge(list);
+	__skb_queue_head_init(list);
+	pr_warn("Failed do assemble buffer\n");
+	return false;
+}
+
 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
  *                         reassemble the clones into one message
  */
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 3e4384c..b4ba1b4 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -98,7 +98,7 @@ struct plist;
 #define MAX_H_SIZE                60	/* Largest possible TIPC header size */
 
 #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
-
+#define FB_MTU                  3744
 #define TIPC_MEDIA_INFO_OFFSET	5
 
 struct tipc_skb_cb {
@@ -943,6 +943,7 @@ bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		   int offset, int dsz, int mtu, struct sk_buff_head *list);
 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
+bool tipc_msg_assemble(struct sk_buff_head *list);
 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
 bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
 			struct sk_buff_head *cpy);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index b3829bc..64cdd3c 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -328,7 +328,8 @@ static struct publication *tipc_nameseq_insert_publ(struct net *net,
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
 		tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
 					    TIPC_PUBLISHED, publ->ref,
-					    publ->node, created_subseq);
+					    publ->node, publ->scope,
+					    created_subseq);
 	}
 	return publ;
 }
@@ -398,19 +399,21 @@ static struct publication *tipc_nameseq_remove_publ(struct net *net,
 	list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
 		tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
 					    TIPC_WITHDRAWN, publ->ref,
-					    publ->node, removed_subseq);
+					    publ->node, publ->scope,
+					    removed_subseq);
 	}
 
 	return publ;
 }
 
 /**
- * tipc_nameseq_subscribe - attach a subscription, and issue
- * the prescribed number of events if there is any sub-
+ * tipc_nameseq_subscribe - attach a subscription, and optionally
+ * issue the prescribed number of events if there is any sub-
  * sequence overlapping with the requested sequence
  */
 static void tipc_nameseq_subscribe(struct name_seq *nseq,
-				   struct tipc_subscription *s)
+				   struct tipc_subscription *s,
+				   bool status)
 {
 	struct sub_seq *sseq = nseq->sseqs;
 	struct tipc_name_seq ns;
@@ -420,7 +423,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
 	tipc_subscrp_get(s);
 	list_add(&s->nameseq_list, &nseq->subscriptions);
 
-	if (!sseq)
+	if (!status || !sseq)
 		return;
 
 	while (sseq != &nseq->sseqs[nseq->first_free]) {
@@ -434,6 +437,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
 							    sseq->upper,
 							    TIPC_PUBLISHED,
 							    crs->ref, crs->node,
+							    crs->scope,
 							    must_report);
 				must_report = 0;
 			}
@@ -597,7 +601,7 @@ u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
 	return ref;
 }
 
-bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
+bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope,
 			 struct list_head *dsts, int *dstcnt, u32 exclude,
 			 bool all)
 {
@@ -607,9 +611,6 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
 	struct name_seq *seq;
 	struct sub_seq *sseq;
 
-	if (!tipc_in_scope(domain, self))
-		return false;
-
 	*dstcnt = 0;
 	rcu_read_lock();
 	seq = nametbl_find_seq(net, type);
@@ -620,7 +621,7 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
 	if (likely(sseq)) {
 		info = sseq->info;
 		list_for_each_entry(publ, &info->zone_list, zone_list) {
-			if (!tipc_in_scope(domain, publ->node))
+			if (publ->scope != scope)
 				continue;
 			if (publ->ref == exclude && publ->node == self)
 				continue;
@@ -638,13 +639,14 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain,
 	return !list_empty(dsts);
 }
 
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
-			      u32 limit, struct list_head *dports)
+int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
+			   u32 scope, bool exact, struct list_head *dports)
 {
-	struct name_seq *seq;
-	struct sub_seq *sseq;
 	struct sub_seq *sseq_stop;
 	struct name_info *info;
+	struct publication *p;
+	struct name_seq *seq;
+	struct sub_seq *sseq;
 	int res = 0;
 
 	rcu_read_lock();
@@ -656,15 +658,12 @@ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
 	sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
 	sseq_stop = seq->sseqs + seq->first_free;
 	for (; sseq != sseq_stop; sseq++) {
-		struct publication *publ;
-
 		if (sseq->lower > upper)
 			break;
-
 		info = sseq->info;
-		list_for_each_entry(publ, &info->node_list, node_list) {
-			if (publ->scope <= limit)
-				tipc_dest_push(dports, 0, publ->ref);
+		list_for_each_entry(p, &info->node_list, node_list) {
+			if (p->scope == scope || (!exact && p->scope < scope))
+				tipc_dest_push(dports, 0, p->ref);
 		}
 
 		if (info->cluster_list_size != info->node_list_size)
@@ -681,7 +680,7 @@ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
  * - Determines if any node local ports overlap
  */
 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
-				   u32 upper, u32 domain,
+				   u32 upper, u32 scope,
 				   struct tipc_nlist *nodes)
 {
 	struct sub_seq *sseq, *stop;
@@ -700,7 +699,7 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
 	for (; sseq != stop && sseq->lower <= upper; sseq++) {
 		info = sseq->info;
 		list_for_each_entry(publ, &info->zone_list, zone_list) {
-			if (tipc_in_scope(domain, publ->node))
+			if (publ->scope == scope)
 				tipc_nlist_add(nodes, publ->node);
 		}
 	}
@@ -712,7 +711,7 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
 /* tipc_nametbl_build_group - build list of communication group members
  */
 void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
-			      u32 type, u32 domain)
+			      u32 type, u32 scope)
 {
 	struct sub_seq *sseq, *stop;
 	struct name_info *info;
@@ -730,9 +729,9 @@ void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
 	for (; sseq != stop; sseq++) {
 		info = sseq->info;
 		list_for_each_entry(p, &info->zone_list, zone_list) {
-			if (!tipc_in_scope(domain, p->node))
+			if (p->scope != scope)
 				continue;
-			tipc_group_add_member(grp, p->node, p->ref);
+			tipc_group_add_member(grp, p->node, p->ref, p->lower);
 		}
 	}
 	spin_unlock_bh(&seq->lock);
@@ -811,7 +810,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
 /**
  * tipc_nametbl_subscribe - add a subscription object to the name table
  */
-void tipc_nametbl_subscribe(struct tipc_subscription *s)
+void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status)
 {
 	struct tipc_net *tn = net_generic(s->net, tipc_net_id);
 	u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
@@ -825,7 +824,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
 		seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
 	if (seq) {
 		spin_lock_bh(&seq->lock);
-		tipc_nameseq_subscribe(seq, s);
+		tipc_nameseq_subscribe(seq, s, status);
 		spin_unlock_bh(&seq->lock);
 	} else {
 		tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 71926e42..b595d8a 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -100,8 +100,8 @@ struct name_table {
 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
 u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
-			      u32 limit, struct list_head *dports);
+int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper,
+			   u32 scope, bool exact, struct list_head *dports);
 void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
 			      u32 type, u32 domain);
 void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
@@ -121,7 +121,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
 struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
 					     u32 lower, u32 node, u32 ref,
 					     u32 key);
-void tipc_nametbl_subscribe(struct tipc_subscription *s);
+void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status);
 void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
 int tipc_nametbl_init(struct net *net);
 void tipc_nametbl_stop(struct net *net);
diff --git a/net/tipc/server.c b/net/tipc/server.c
index d60c303..8ee5e86 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -489,8 +489,8 @@ void tipc_conn_terminate(struct tipc_server *s, int conid)
 	}
 }
 
-bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
-			     u32 lower, u32 upper, int *conid)
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+			     u32 upper, u32 filter, int *conid)
 {
 	struct tipc_subscriber *scbr;
 	struct tipc_subscr sub;
@@ -501,7 +501,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
 	sub.seq.lower = lower;
 	sub.seq.upper = upper;
 	sub.timeout = TIPC_WAIT_FOREVER;
-	sub.filter = TIPC_SUB_PORTS;
+	sub.filter = filter;
 	*(u32 *)&sub.usr_handle = port;
 
 	con = tipc_alloc_conn(tipc_topsrv(net));
diff --git a/net/tipc/server.h b/net/tipc/server.h
index 2113c91..17f49ee 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -41,6 +41,9 @@
 #include <net/net_namespace.h>
 
 #define TIPC_SERVER_NAME_LEN	32
+#define TIPC_SUB_CLUSTER_SCOPE  0x20
+#define TIPC_SUB_NODE_SCOPE     0x40
+#define TIPC_SUB_NO_STATUS      0x80
 
 /**
  * struct tipc_server - TIPC server structure
@@ -83,8 +86,8 @@ struct tipc_server {
 int tipc_conn_sendmsg(struct tipc_server *s, int conid,
 		      struct sockaddr_tipc *addr, void *data, size_t len);
 
-bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type,
-			     u32 lower, u32 upper, int *conid);
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+			     u32 upper, u32 filter, int *conid);
 void tipc_topsrv_kern_unsubscr(struct net *net, int conid);
 
 /**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b40844..1f23627 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -715,7 +715,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 {
 	struct sock *sk = sock->sk;
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_group *grp = tsk->group;
+	struct tipc_group *grp;
 	u32 revents = 0;
 
 	sock_poll_wait(file, sk_sleep(sk), wait);
@@ -736,9 +736,9 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 			revents |= POLLIN | POLLRDNORM;
 		break;
 	case TIPC_OPEN:
-		if (!grp || tipc_group_size(grp))
-			if (!tsk->cong_link_cnt)
-				revents |= POLLOUT;
+		grp = tsk->group;
+		if ((!grp || tipc_group_is_open(grp)) && !tsk->cong_link_cnt)
+			revents |= POLLOUT;
 		if (!tipc_sk_type_connectionless(sk))
 			break;
 		if (skb_queue_empty(&sk->sk_receive_queue))
@@ -928,21 +928,22 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
 	struct list_head *cong_links = &tsk->cong_links;
 	int blks = tsk_blocks(GROUP_H_SIZE + dlen);
 	struct tipc_group *grp = tsk->group;
+	struct tipc_msg *hdr = &tsk->phdr;
 	struct tipc_member *first = NULL;
 	struct tipc_member *mbr = NULL;
 	struct net *net = sock_net(sk);
 	u32 node, port, exclude;
-	u32 type, inst, domain;
 	struct list_head dsts;
+	u32 type, inst, scope;
 	int lookups = 0;
 	int dstcnt, rc;
 	bool cong;
 
 	INIT_LIST_HEAD(&dsts);
 
-	type = dest->addr.name.name.type;
+	type = msg_nametype(hdr);
 	inst = dest->addr.name.name.instance;
-	domain = addr_domain(net, dest->scope);
+	scope = msg_lookup_scope(hdr);
 	exclude = tipc_group_exclude(grp);
 
 	while (++lookups < 4) {
@@ -950,7 +951,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
 
 		/* Look for a non-congested destination member, if any */
 		while (1) {
-			if (!tipc_nametbl_lookup(net, type, inst, domain, &dsts,
+			if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
 						 &dstcnt, exclude, false))
 				return -EHOSTUNREACH;
 			tipc_dest_pop(&dsts, &node, &port);
@@ -1079,22 +1080,23 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
 {
 	struct sock *sk = sock->sk;
 	DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-	struct tipc_name_seq *seq = &dest->addr.nameseq;
 	struct tipc_sock *tsk = tipc_sk(sk);
 	struct tipc_group *grp = tsk->group;
+	struct tipc_msg *hdr = &tsk->phdr;
 	struct net *net = sock_net(sk);
-	u32 domain, exclude, dstcnt;
+	u32 type, inst, scope, exclude;
 	struct list_head dsts;
+	u32 dstcnt;
 
 	INIT_LIST_HEAD(&dsts);
 
-	if (seq->lower != seq->upper)
-		return -ENOTSUPP;
-
-	domain = addr_domain(net, dest->scope);
+	type = msg_nametype(hdr);
+	inst = dest->addr.name.name.instance;
+	scope = msg_lookup_scope(hdr);
 	exclude = tipc_group_exclude(grp);
-	if (!tipc_nametbl_lookup(net, seq->type, seq->lower, domain,
-				 &dsts, &dstcnt, exclude, true))
+
+	if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
+				 &dstcnt, exclude, true))
 		return -EHOSTUNREACH;
 
 	if (dstcnt == 1) {
@@ -1116,24 +1118,29 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 		       struct sk_buff_head *inputq)
 {
-	u32 scope = TIPC_CLUSTER_SCOPE;
 	u32 self = tipc_own_addr(net);
+	u32 type, lower, upper, scope;
 	struct sk_buff *skb, *_skb;
-	u32 lower = 0, upper = ~0;
-	struct sk_buff_head tmpq;
 	u32 portid, oport, onode;
+	struct sk_buff_head tmpq;
 	struct list_head dports;
-	struct tipc_msg *msg;
-	int user, mtyp, hsz;
+	struct tipc_msg *hdr;
+	int user, mtyp, hlen;
+	bool exact;
 
 	__skb_queue_head_init(&tmpq);
 	INIT_LIST_HEAD(&dports);
 
 	skb = tipc_skb_peek(arrvq, &inputq->lock);
 	for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
-		msg = buf_msg(skb);
-		user = msg_user(msg);
-		mtyp = msg_type(msg);
+		hdr = buf_msg(skb);
+		user = msg_user(hdr);
+		mtyp = msg_type(hdr);
+		hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
+		oport = msg_origport(hdr);
+		onode = msg_orignode(hdr);
+		type = msg_nametype(hdr);
+
 		if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
 			spin_lock_bh(&inputq->lock);
 			if (skb_peek(arrvq) == skb) {
@@ -1144,21 +1151,31 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 			spin_unlock_bh(&inputq->lock);
 			continue;
 		}
-		hsz = skb_headroom(skb) + msg_hdr_sz(msg);
-		oport = msg_origport(msg);
-		onode = msg_orignode(msg);
-		if (onode == self)
-			scope = TIPC_NODE_SCOPE;
 
-		/* Create destination port list and message clones: */
-		if (!msg_in_group(msg)) {
-			lower = msg_namelower(msg);
-			upper = msg_nameupper(msg);
+		/* Group messages require exact scope match */
+		if (msg_in_group(hdr)) {
+			lower = 0;
+			upper = ~0;
+			scope = msg_lookup_scope(hdr);
+			exact = true;
+		} else {
+			/* TIPC_NODE_SCOPE means "any scope" in this context */
+			if (onode == self)
+				scope = TIPC_NODE_SCOPE;
+			else
+				scope = TIPC_CLUSTER_SCOPE;
+			exact = false;
+			lower = msg_namelower(hdr);
+			upper = msg_nameupper(hdr);
 		}
-		tipc_nametbl_mc_translate(net, msg_nametype(msg), lower, upper,
-					  scope, &dports);
+
+		/* Create destination port list: */
+		tipc_nametbl_mc_lookup(net, type, lower, upper,
+				       scope, exact, &dports);
+
+		/* Clone message per destination */
 		while (tipc_dest_pop(&dports, NULL, &portid)) {
-			_skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
+			_skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
 			if (_skb) {
 				msg_set_destport(buf_msg(_skb), portid);
 				__skb_queue_tail(&tmpq, _skb);
@@ -1933,8 +1950,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
 		break;
 	case TOP_SRV:
 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
-				      skb, inputq, xmitq);
-		skb = NULL;
+				      hdr, inputq, xmitq);
 		break;
 	default:
 		break;
@@ -2640,9 +2656,7 @@ void tipc_sk_reinit(struct net *net)
 	rhashtable_walk_enter(&tn->sk_rht, &iter);
 
 	do {
-		tsk = ERR_PTR(rhashtable_walk_start(&iter));
-		if (IS_ERR(tsk))
-			goto walk_stop;
+		rhashtable_walk_start(&iter);
 
 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
 			spin_lock_bh(&tsk->sk.sk_lock.slock);
@@ -2651,7 +2665,7 @@ void tipc_sk_reinit(struct net *net)
 			msg_set_orignode(msg, tn->own_addr);
 			spin_unlock_bh(&tsk->sk.sk_lock.slock);
 		}
-walk_stop:
+
 		rhashtable_walk_stop(&iter);
 	} while (tsk == ERR_PTR(-EAGAIN));
 }
@@ -2734,7 +2748,6 @@ void tipc_sk_rht_destroy(struct net *net)
 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
 {
 	struct net *net = sock_net(&tsk->sk);
-	u32 domain = addr_domain(net, mreq->scope);
 	struct tipc_group *grp = tsk->group;
 	struct tipc_msg *hdr = &tsk->phdr;
 	struct tipc_name_seq seq;
@@ -2742,6 +2755,8 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
 
 	if (mreq->type < TIPC_RESERVED_TYPES)
 		return -EACCES;
+	if (mreq->scope > TIPC_NODE_SCOPE)
+		return -EINVAL;
 	if (grp)
 		return -EACCES;
 	grp = tipc_group_create(net, tsk->portid, mreq);
@@ -2754,16 +2769,16 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
 	seq.type = mreq->type;
 	seq.lower = mreq->instance;
 	seq.upper = seq.lower;
-	tipc_nametbl_build_group(net, grp, mreq->type, domain);
+	tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
 	rc = tipc_sk_publish(tsk, mreq->scope, &seq);
 	if (rc) {
 		tipc_group_delete(net, grp);
 		tsk->group = NULL;
 	}
-
-	/* Eliminate any risk that a broadcast overtakes the sent JOIN */
+	/* Eliminate any risk that a broadcast overtakes sent JOINs */
 	tsk->mc_method.rcast = true;
 	tsk->mc_method.mandatory = true;
+	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
 	return rc;
 }
 
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 251065d..44df528 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -118,15 +118,19 @@ void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
 
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 				 u32 found_upper, u32 event, u32 port_ref,
-				 u32 node, int must)
+				 u32 node, u32 scope, int must)
 {
+	u32 filter = htohl(sub->evt.s.filter, sub->swap);
 	struct tipc_name_seq seq;
 
 	tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
 	if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
 		return;
-	if (!must &&
-	    !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS))
+	if (!must && !(filter & TIPC_SUB_PORTS))
+		return;
+	if (filter & TIPC_SUB_CLUSTER_SCOPE && scope == TIPC_NODE_SCOPE)
+		return;
+	if (filter & TIPC_SUB_NODE_SCOPE && scope != TIPC_NODE_SCOPE)
 		return;
 
 	tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
@@ -286,7 +290,8 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
 }
 
 static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
-				   struct tipc_subscriber *subscriber, int swap)
+				   struct tipc_subscriber *subscriber, int swap,
+				   bool status)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_subscription *sub = NULL;
@@ -299,7 +304,7 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 	spin_lock_bh(&subscriber->lock);
 	list_add(&sub->subscrp_list, &subscriber->subscrp_list);
 	sub->subscriber = subscriber;
-	tipc_nametbl_subscribe(sub);
+	tipc_nametbl_subscribe(sub, status);
 	tipc_subscrb_get(subscriber);
 	spin_unlock_bh(&subscriber->lock);
 
@@ -323,6 +328,7 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
 {
 	struct tipc_subscriber *subscriber = usr_data;
 	struct tipc_subscr *s = (struct tipc_subscr *)buf;
+	bool status;
 	int swap;
 
 	/* Determine subscriber's endianness */
@@ -334,8 +340,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
 		s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
 		return tipc_subscrp_cancel(s, subscriber);
 	}
-
-	tipc_subscrp_subscribe(net, s, subscriber, swap);
+	status = !(s->filter & htohl(TIPC_SUB_NO_STATUS, swap));
+	tipc_subscrp_subscribe(net, s, subscriber, swap, status);
 }
 
 /* Handle one request to establish a new subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ee52957..f3edca7 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -71,7 +71,7 @@ int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
 			       u32 found_upper);
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
 				 u32 found_lower, u32 found_upper, u32 event,
-				 u32 port_ref, u32 node, int must);
+				 u32 port_ref, u32 node, u32 scope, int must);
 void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
 			      struct tipc_name_seq *out);
 u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 413d4f4..a1d1099 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -126,6 +126,11 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 	wdev->ibss_fixed = params->channel_fixed;
 	wdev->ibss_dfs_possible = params->userspace_handles_dfs;
 	wdev->chandef = params->chandef;
+	if (connkeys) {
+		params->wep_keys = connkeys->params;
+		params->wep_tx_key = connkeys->def;
+	}
+
 #ifdef CONFIG_CFG80211_WEXT
 	wdev->wext.ibss.chandef = params->chandef;
 #endif
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index e7c64a8..bbb9907 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -692,7 +692,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
 	return rdev_mgmt_tx(rdev, wdev, params, cookie);
 }
 
-bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
 		      const u8 *buf, size_t len, u32 flags)
 {
 	struct wiphy *wiphy = wdev->wiphy;
@@ -708,7 +708,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
 		cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
 	u16 stype;
 
-	trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm);
+	trace_cfg80211_rx_mgmt(wdev, freq, sig_dbm);
 	stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4;
 
 	if (!(stypes->rx & BIT(stype))) {
@@ -735,7 +735,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
 
 		/* Indicate the received Action frame to user space */
 		if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
-				      freq, sig_mbm,
+				      freq, sig_dbm,
 				      buf, len, flags, GFP_ATOMIC))
 			continue;
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2b3dbcd..c084dd2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -734,11 +734,12 @@ struct key_parse {
 	bool def_uni, def_multi;
 };
 
-static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
+static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
+				 struct key_parse *k)
 {
 	struct nlattr *tb[NL80211_KEY_MAX + 1];
 	int err = nla_parse_nested(tb, NL80211_KEY_MAX, key,
-				   nl80211_key_policy, NULL);
+				   nl80211_key_policy, info->extack);
 	if (err)
 		return err;
 
@@ -771,7 +772,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
 	if (tb[NL80211_KEY_TYPE]) {
 		k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
 		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
-			return -EINVAL;
+			return genl_err_attr(info, -EINVAL,
+					     tb[NL80211_KEY_TYPE]);
 	}
 
 	if (tb[NL80211_KEY_DEFAULT_TYPES]) {
@@ -779,7 +781,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
 
 		err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
 				       tb[NL80211_KEY_DEFAULT_TYPES],
-				       nl80211_key_default_policy, NULL);
+				       nl80211_key_default_policy,
+				       info->extack);
 		if (err)
 			return err;
 
@@ -820,8 +823,10 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
 
 	if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
 		k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
-		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
+		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) {
+			GENL_SET_ERR_MSG(info, "key type out of range");
 			return -EINVAL;
+		}
 	}
 
 	if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
@@ -850,31 +855,42 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
 	k->type = -1;
 
 	if (info->attrs[NL80211_ATTR_KEY])
-		err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k);
+		err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k);
 	else
 		err = nl80211_parse_key_old(info, k);
 
 	if (err)
 		return err;
 
-	if (k->def && k->defmgmt)
+	if (k->def && k->defmgmt) {
+		GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid");
 		return -EINVAL;
+	}
 
 	if (k->defmgmt) {
-		if (k->def_uni || !k->def_multi)
+		if (k->def_uni || !k->def_multi) {
+			GENL_SET_ERR_MSG(info, "defmgmt key must be mcast");
 			return -EINVAL;
+		}
 	}
 
 	if (k->idx != -1) {
 		if (k->defmgmt) {
-			if (k->idx < 4 || k->idx > 5)
+			if (k->idx < 4 || k->idx > 5) {
+				GENL_SET_ERR_MSG(info,
+						 "defmgmt key idx not 4 or 5");
 				return -EINVAL;
+			}
 		} else if (k->def) {
-			if (k->idx < 0 || k->idx > 3)
+			if (k->idx < 0 || k->idx > 3) {
+				GENL_SET_ERR_MSG(info, "def key idx not 0-3");
 				return -EINVAL;
+			}
 		} else {
-			if (k->idx < 0 || k->idx > 5)
+			if (k->idx < 0 || k->idx > 5) {
+				GENL_SET_ERR_MSG(info, "key idx not 0-5");
 				return -EINVAL;
+			}
 		}
 	}
 
@@ -883,8 +899,9 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
 
 static struct cfg80211_cached_keys *
 nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
-		       struct nlattr *keys, bool *no_ht)
+		       struct genl_info *info, bool *no_ht)
 {
+	struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS];
 	struct key_parse parse;
 	struct nlattr *key;
 	struct cfg80211_cached_keys *result;
@@ -909,17 +926,22 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
 		memset(&parse, 0, sizeof(parse));
 		parse.idx = -1;
 
-		err = nl80211_parse_key_new(key, &parse);
+		err = nl80211_parse_key_new(info, key, &parse);
 		if (err)
 			goto error;
 		err = -EINVAL;
 		if (!parse.p.key)
 			goto error;
-		if (parse.idx < 0 || parse.idx > 3)
+		if (parse.idx < 0 || parse.idx > 3) {
+			GENL_SET_ERR_MSG(info, "key index out of range [0-3]");
 			goto error;
+		}
 		if (parse.def) {
-			if (def)
+			if (def) {
+				GENL_SET_ERR_MSG(info,
+						 "only one key can be default");
 				goto error;
+			}
 			def = 1;
 			result->def = parse.idx;
 			if (!parse.def_uni || !parse.def_multi)
@@ -932,6 +954,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
 			goto error;
 		if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
 		    parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
+			GENL_SET_ERR_MSG(info, "connect key must be WEP");
 			err = -EINVAL;
 			goto error;
 		}
@@ -947,6 +970,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
 
 	if (result->def < 0) {
 		err = -EINVAL;
+		GENL_SET_ERR_MSG(info, "need a default/TX key");
 		goto error;
 	}
 
@@ -7817,6 +7841,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
 			      intbss->ts_boottime, NL80211_BSS_PAD))
 		goto nla_put_failure;
 
+	if (!nl80211_put_signal(msg, intbss->pub.chains,
+				intbss->pub.chain_signal,
+				NL80211_BSS_CHAIN_SIGNAL))
+		goto nla_put_failure;
+
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
 		if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@ -8613,9 +8642,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
 	if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
 		bool no_ht = false;
 
-		connkeys = nl80211_parse_connkeys(rdev,
-					  info->attrs[NL80211_ATTR_KEYS],
-					  &no_ht);
+		connkeys = nl80211_parse_connkeys(rdev, info, &no_ht);
 		if (IS_ERR(connkeys))
 			return PTR_ERR(connkeys);
 
@@ -9019,8 +9046,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
 	}
 
 	if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
-		connkeys = nl80211_parse_connkeys(rdev,
-					  info->attrs[NL80211_ATTR_KEYS], NULL);
+		connkeys = nl80211_parse_connkeys(rdev, info, NULL);
 		if (IS_ERR(connkeys))
 			return PTR_ERR(connkeys);
 	}
@@ -13945,7 +13971,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
 
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
-	    (from_ap && reason &&
+	    (reason &&
 	     nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
 	    (from_ap &&
 	     nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index f6c5fe48..d36c3eb 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -981,6 +981,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
 		found->ts = tmp->ts;
 		found->ts_boottime = tmp->ts_boottime;
 		found->parent_tsf = tmp->parent_tsf;
+		found->pub.chains = tmp->pub.chains;
+		memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
+		       IEEE80211_MAX_CHAINS);
 		ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
 	} else {
 		struct cfg80211_internal_bss *new;
@@ -1233,6 +1236,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
 	tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 	tmp.ts_boottime = data->boottime_ns;
 	tmp.parent_tsf = data->parent_tsf;
+	tmp.pub.chains = data->chains;
+	memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
 	ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
 
 	signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index f3353fe..bcfedd3 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2544,20 +2544,20 @@ DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta,
 );
 
 TRACE_EVENT(cfg80211_rx_mgmt,
-	TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm),
-	TP_ARGS(wdev, freq, sig_mbm),
+	TP_PROTO(struct wireless_dev *wdev, int freq, int sig_dbm),
+	TP_ARGS(wdev, freq, sig_dbm),
 	TP_STRUCT__entry(
 		WDEV_ENTRY
 		__field(int, freq)
-		__field(int, sig_mbm)
+		__field(int, sig_dbm)
 	),
 	TP_fast_assign(
 		WDEV_ASSIGN;
 		__entry->freq = freq;
-		__entry->sig_mbm = sig_mbm;
+		__entry->sig_dbm = sig_dbm;
 	),
-	TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d",
-		  WDEV_PR_ARG, __entry->freq, __entry->sig_mbm)
+	TP_printk(WDEV_PR_FMT ", freq: %d, sig dbm: %d",
+		  WDEV_PR_ARG, __entry->freq, __entry->sig_dbm)
 );
 
 TRACE_EVENT(cfg80211_mgmt_tx_status,
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 30e5746..7598250 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -23,32 +23,114 @@
 #include <linux/notifier.h>
 
 #ifdef CONFIG_XFRM_OFFLOAD
-int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
 {
 	int err;
+	unsigned long flags;
 	struct xfrm_state *x;
+	struct sk_buff *skb2;
+	struct softnet_data *sd;
+	netdev_features_t esp_features = features;
 	struct xfrm_offload *xo = xfrm_offload(skb);
 
-	if (skb_is_gso(skb))
-		return 0;
+	if (!xo)
+		return skb;
 
-	if (xo) {
-		x = skb->sp->xvec[skb->sp->len - 1];
-		if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
-			return 0;
+	if (!(features & NETIF_F_HW_ESP))
+		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
 
+	x = skb->sp->xvec[skb->sp->len - 1];
+	if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+		return skb;
+
+	local_irq_save(flags);
+	sd = this_cpu_ptr(&softnet_data);
+	err = !skb_queue_empty(&sd->xfrm_backlog);
+	local_irq_restore(flags);
+
+	if (err) {
+		*again = true;
+		return skb;
+	}
+
+	if (skb_is_gso(skb)) {
+		struct net_device *dev = skb->dev;
+
+		if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
+			struct sk_buff *segs;
+
+			/* Packet got rerouted, fixup features and segment it. */
+			esp_features = esp_features & ~(NETIF_F_HW_ESP
+							| NETIF_F_GSO_ESP);
+
+			segs = skb_gso_segment(skb, esp_features);
+			if (IS_ERR(segs)) {
+				kfree_skb(skb);
+				atomic_long_inc(&dev->tx_dropped);
+				return NULL;
+			} else {
+				consume_skb(skb);
+				skb = segs;
+			}
+		}
+	}
+
+	if (!skb->next) {
 		x->outer_mode->xmit(x, skb);
 
-		err = x->type_offload->xmit(x, skb, features);
+		xo->flags |= XFRM_DEV_RESUME;
+
+		err = x->type_offload->xmit(x, skb, esp_features);
 		if (err) {
+			if (err == -EINPROGRESS)
+				return NULL;
+
 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
-			return err;
+			kfree_skb(skb);
+			return NULL;
 		}
 
 		skb_push(skb, skb->data - skb_mac_header(skb));
+
+		return skb;
 	}
 
-	return 0;
+	skb2 = skb;
+
+	do {
+		struct sk_buff *nskb = skb2->next;
+		skb2->next = NULL;
+
+		xo = xfrm_offload(skb2);
+		xo->flags |= XFRM_DEV_RESUME;
+
+		x->outer_mode->xmit(x, skb2);
+
+		err = x->type_offload->xmit(x, skb2, esp_features);
+		if (!err) {
+			skb2->next = nskb;
+		} else if (err != -EINPROGRESS) {
+			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
+			skb2->next = nskb;
+			kfree_skb_list(skb2);
+			return NULL;
+		} else {
+			if (skb == skb2)
+				skb = nskb;
+
+			if (!skb)
+				return NULL;
+
+			goto skip_push;
+		}
+
+		skb_push(skb2, skb2->data - skb_mac_header(skb2));
+
+skip_push:
+		skb2 = nskb;
+	} while (skb2);
+
+	return skb;
 }
 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
 
@@ -67,7 +149,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 
 	/* We don't yet support UDP encapsulation, TFC padding and ESN. */
 	if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN))
-		return 0;
+		return -EINVAL;
 
 	dev = dev_get_by_index(net, xuo->ifindex);
 	if (!dev) {
@@ -120,8 +202,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 	if (!x->type_offload || x->encap)
 		return false;
 
-	if ((x->xso.offload_handle && (dev == dst->path->dev)) &&
-	     !dst->child->xfrm && x->type->get_mtu) {
+	if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
+	     (!xdst->child->xfrm && x->type->get_mtu)) {
 		mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
 
 		if (skb->len <= mtu)
@@ -140,19 +222,82 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 	return true;
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
+
+void xfrm_dev_resume(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+	int ret = NETDEV_TX_BUSY;
+	struct netdev_queue *txq;
+	struct softnet_data *sd;
+	unsigned long flags;
+
+	rcu_read_lock();
+	txq = netdev_pick_tx(dev, skb, NULL);
+
+	HARD_TX_LOCK(dev, txq, smp_processor_id());
+	if (!netif_xmit_frozen_or_stopped(txq))
+		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+	HARD_TX_UNLOCK(dev, txq);
+
+	if (!dev_xmit_complete(ret)) {
+		local_irq_save(flags);
+		sd = this_cpu_ptr(&softnet_data);
+		skb_queue_tail(&sd->xfrm_backlog, skb);
+		raise_softirq_irqoff(NET_TX_SOFTIRQ);
+		local_irq_restore(flags);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_resume);
+
+void xfrm_dev_backlog(struct softnet_data *sd)
+{
+	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
+	struct sk_buff_head list;
+	struct sk_buff *skb;
+
+	if (skb_queue_empty(xfrm_backlog))
+		return;
+
+	__skb_queue_head_init(&list);
+
+	spin_lock(&xfrm_backlog->lock);
+	skb_queue_splice_init(xfrm_backlog, &list);
+	spin_unlock(&xfrm_backlog->lock);
+
+	while (!skb_queue_empty(&list)) {
+		skb = __skb_dequeue(&list);
+		xfrm_dev_resume(skb);
+	}
+
+}
 #endif
 
-static int xfrm_dev_register(struct net_device *dev)
+static int xfrm_api_check(struct net_device *dev)
 {
-	if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-		return NOTIFY_BAD;
+#ifdef CONFIG_XFRM_OFFLOAD
 	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
 	    !(dev->features & NETIF_F_HW_ESP))
 		return NOTIFY_BAD;
 
+	if ((dev->features & NETIF_F_HW_ESP) &&
+	    (!(dev->xfrmdev_ops &&
+	       dev->xfrmdev_ops->xdo_dev_state_add &&
+	       dev->xfrmdev_ops->xdo_dev_state_delete)))
+		return NOTIFY_BAD;
+#else
+	if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
+		return NOTIFY_BAD;
+#endif
+
 	return NOTIFY_DONE;
 }
 
+static int xfrm_dev_register(struct net_device *dev)
+{
+	return xfrm_api_check(dev);
+}
+
 static int xfrm_dev_unregister(struct net_device *dev)
 {
 	xfrm_policy_cache_flush();
@@ -161,16 +306,7 @@ static int xfrm_dev_unregister(struct net_device *dev)
 
 static int xfrm_dev_feat_change(struct net_device *dev)
 {
-	if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops)
-		return NOTIFY_BAD;
-	else if (!(dev->features & NETIF_F_HW_ESP))
-		dev->xfrmdev_ops = NULL;
-
-	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
-	    !(dev->features & NETIF_F_HW_ESP))
-		return NOTIFY_BAD;
-
-	return NOTIFY_DONE;
+	return xfrm_api_check(dev);
 }
 
 static int xfrm_dev_down(struct net_device *dev)
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 3f6f6f8..26b10eb 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -257,7 +257,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
 		if (xo && (xo->flags & CRYPTO_DONE)) {
 			crypto_done = true;
-			x = xfrm_input_state(skb);
 			family = XFRM_SPI_SKB_CB(skb)->family;
 
 			if (!(xo->status & CRYPTO_SUCCESS)) {
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 73ad8c8..2346867 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -44,7 +44,7 @@ static int xfrm_skb_check_space(struct sk_buff *skb)
 
 static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
 {
-	struct dst_entry *child = dst_clone(skb_dst(skb)->child);
+	struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
 
 	skb_dst_drop(skb);
 	return child;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 70aa5cb..d8a8129 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -54,7 +54,7 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
 static struct kmem_cache *xfrm_dst_cache __read_mostly;
 static __read_mostly seqcount_t xfrm_policy_hash_generation;
 
-static void xfrm_init_pmtu(struct dst_entry *dst);
+static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
 static int stale_bundle(struct dst_entry *dst);
 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
 static void xfrm_policy_queue_process(struct timer_list *t);
@@ -1257,7 +1257,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
 
 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
 {
-	struct net *net = xp_net(pol);
+	struct net *net = sock_net(sk);
 	struct xfrm_policy *old_pol;
 
 #ifdef CONFIG_XFRM_SUB_POLICY
@@ -1544,7 +1544,9 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  */
 
 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
-					    struct xfrm_state **xfrm, int nx,
+					    struct xfrm_state **xfrm,
+					    struct xfrm_dst **bundle,
+					    int nx,
 					    const struct flowi *fl,
 					    struct dst_entry *dst)
 {
@@ -1552,8 +1554,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 	unsigned long now = jiffies;
 	struct net_device *dev;
 	struct xfrm_mode *inner_mode;
-	struct dst_entry *dst_prev = NULL;
-	struct dst_entry *dst0 = NULL;
+	struct xfrm_dst *xdst_prev = NULL;
+	struct xfrm_dst *xdst0 = NULL;
 	int i = 0;
 	int err;
 	int header_len = 0;
@@ -1579,13 +1581,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 			goto put_states;
 		}
 
-		if (!dst_prev)
-			dst0 = dst1;
+		bundle[i] = xdst;
+		if (!xdst_prev)
+			xdst0 = xdst;
 		else
 			/* Ref count is taken during xfrm_alloc_dst()
 			 * No need to do dst_clone() on dst1
 			 */
-			dst_prev->child = dst1;
+			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
 
 		if (xfrm[i]->sel.family == AF_UNSPEC) {
 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
@@ -1622,8 +1625,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 		dst1->input = dst_discard;
 		dst1->output = inner_mode->afinfo->output;
 
-		dst1->next = dst_prev;
-		dst_prev = dst1;
+		xdst_prev = xdst;
 
 		header_len += xfrm[i]->props.header_len;
 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
@@ -1631,40 +1633,39 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 		trailer_len += xfrm[i]->props.trailer_len;
 	}
 
-	dst_prev->child = dst;
-	dst0->path = dst;
+	xfrm_dst_set_child(xdst_prev, dst);
+	xdst0->path = dst;
 
 	err = -ENODEV;
 	dev = dst->dev;
 	if (!dev)
 		goto free_dst;
 
-	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
-	xfrm_init_pmtu(dst_prev);
+	xfrm_init_path(xdst0, dst, nfheader_len);
+	xfrm_init_pmtu(bundle, nx);
 
-	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
-		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
-
-		err = xfrm_fill_dst(xdst, dev, fl);
+	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
+	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
+		err = xfrm_fill_dst(xdst_prev, dev, fl);
 		if (err)
 			goto free_dst;
 
-		dst_prev->header_len = header_len;
-		dst_prev->trailer_len = trailer_len;
-		header_len -= xdst->u.dst.xfrm->props.header_len;
-		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
+		xdst_prev->u.dst.header_len = header_len;
+		xdst_prev->u.dst.trailer_len = trailer_len;
+		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
+		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
 	}
 
 out:
-	return dst0;
+	return &xdst0->u.dst;
 
 put_states:
 	for (; i < nx; i++)
 		xfrm_state_put(xfrm[i]);
 free_dst:
-	if (dst0)
-		dst_release_immediate(dst0);
-	dst0 = ERR_PTR(err);
+	if (xdst0)
+		dst_release_immediate(&xdst0->u.dst);
+	xdst0 = ERR_PTR(err);
 	goto out;
 }
 
@@ -1806,7 +1807,7 @@ static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
 	for (i = 0; i < num; i++) {
 		if (!dst || dst->xfrm != xfrm[i])
 			return false;
-		dst = dst->child;
+		dst = xfrm_dst_child(dst);
 	}
 
 	return xfrm_bundle_ok(xdst);
@@ -1819,6 +1820,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 {
 	struct net *net = xp_net(pols[0]);
 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
+	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
 	struct xfrm_dst *xdst, *old;
 	struct dst_entry *dst;
 	int err;
@@ -1847,7 +1849,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 
 	old = xdst;
 
-	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
+	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
 	if (IS_ERR(dst)) {
 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
 		return ERR_CAST(dst);
@@ -1887,8 +1889,8 @@ static void xfrm_policy_queue_process(struct timer_list *t)
 	xfrm_decode_session(skb, &fl, dst->ops->family);
 	spin_unlock(&pq->hold_queue.lock);
 
-	dst_hold(dst->path);
-	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
+	dst_hold(xfrm_dst_path(dst));
+	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0);
 	if (IS_ERR(dst))
 		goto purge_queue;
 
@@ -1917,8 +1919,8 @@ static void xfrm_policy_queue_process(struct timer_list *t)
 		skb = __skb_dequeue(&list);
 
 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
-		dst_hold(skb_dst(skb)->path);
-		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
+		dst_hold(xfrm_dst_path(skb_dst(skb)));
+		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
 		if (IS_ERR(dst)) {
 			kfree_skb(skb);
 			continue;
@@ -2019,8 +2021,8 @@ static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
 	dst1->output = xdst_queue_output;
 
 	dst_hold(dst);
-	dst1->child = dst;
-	dst1->path = dst;
+	xfrm_dst_set_child(xdst, dst);
+	xdst->path = dst;
 
 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
 
@@ -2583,7 +2585,7 @@ static int stale_bundle(struct dst_entry *dst)
 
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
 {
-	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
+	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
 		dst->dev = dev_net(dev)->loopback_dev;
 		dev_hold(dst->dev);
 		dev_put(dev);
@@ -2607,13 +2609,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
 	return dst;
 }
 
-static void xfrm_init_pmtu(struct dst_entry *dst)
+static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
 {
-	do {
-		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+	while (nr--) {
+		struct xfrm_dst *xdst = bundle[nr];
 		u32 pmtu, route_mtu_cached;
+		struct dst_entry *dst;
 
-		pmtu = dst_mtu(dst->child);
+		dst = &xdst->u.dst;
+		pmtu = dst_mtu(xfrm_dst_child(dst));
 		xdst->child_mtu_cached = pmtu;
 
 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
@@ -2625,7 +2629,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
 			pmtu = route_mtu_cached;
 
 		dst_metric_set(dst, RTAX_MTU, pmtu);
-	} while ((dst = dst->next));
+	}
 }
 
 /* Check that the bundle accepts the flow and its components are
@@ -2634,19 +2638,20 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
 
 static int xfrm_bundle_ok(struct xfrm_dst *first)
 {
+	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
 	struct dst_entry *dst = &first->u.dst;
-	struct xfrm_dst *last;
+	struct xfrm_dst *xdst;
+	int start_from, nr;
 	u32 mtu;
 
-	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
+	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
 	    (dst->dev && !netif_running(dst->dev)))
 		return 0;
 
 	if (dst->flags & DST_XFRM_QUEUE)
 		return 1;
 
-	last = NULL;
-
+	start_from = nr = 0;
 	do {
 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
@@ -2658,9 +2663,11 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
 			return 0;
 
-		mtu = dst_mtu(dst->child);
+		bundle[nr++] = xdst;
+
+		mtu = dst_mtu(xfrm_dst_child(dst));
 		if (xdst->child_mtu_cached != mtu) {
-			last = xdst;
+			start_from = nr;
 			xdst->child_mtu_cached = mtu;
 		}
 
@@ -2668,30 +2675,30 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
 			return 0;
 		mtu = dst_mtu(xdst->route);
 		if (xdst->route_mtu_cached != mtu) {
-			last = xdst;
+			start_from = nr;
 			xdst->route_mtu_cached = mtu;
 		}
 
-		dst = dst->child;
+		dst = xfrm_dst_child(dst);
 	} while (dst->xfrm);
 
-	if (likely(!last))
+	if (likely(!start_from))
 		return 1;
 
-	mtu = last->child_mtu_cached;
-	for (;;) {
-		dst = &last->u.dst;
+	xdst = bundle[start_from - 1];
+	mtu = xdst->child_mtu_cached;
+	while (start_from--) {
+		dst = &xdst->u.dst;
 
 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
-		if (mtu > last->route_mtu_cached)
-			mtu = last->route_mtu_cached;
+		if (mtu > xdst->route_mtu_cached)
+			mtu = xdst->route_mtu_cached;
 		dst_metric_set(dst, RTAX_MTU, mtu);
-
-		if (last == first)
+		if (!start_from)
 			break;
 
-		last = (struct xfrm_dst *)last->u.dst.next;
-		last->child_mtu_cached = mtu;
+		xdst = bundle[start_from - 1];
+		xdst->child_mtu_cached = mtu;
 	}
 
 	return 1;
@@ -2699,22 +2706,20 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
 
 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
 {
-	return dst_metric_advmss(dst->path);
+	return dst_metric_advmss(xfrm_dst_path(dst));
 }
 
 static unsigned int xfrm_mtu(const struct dst_entry *dst)
 {
 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
 
-	return mtu ? : dst_mtu(dst->path);
+	return mtu ? : dst_mtu(xfrm_dst_path(dst));
 }
 
 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
 					const void *daddr)
 {
-	const struct dst_entry *path = dst->path;
-
-	for (; dst != path; dst = dst->child) {
+	while (dst->xfrm) {
 		const struct xfrm_state *xfrm = dst->xfrm;
 
 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
@@ -2723,6 +2728,8 @@ static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
 			daddr = xfrm->coaddr;
 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
 			daddr = &xfrm->id.daddr;
+
+		dst = xfrm_dst_child(dst);
 	}
 	return daddr;
 }
@@ -2731,7 +2738,7 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
 					   struct sk_buff *skb,
 					   const void *daddr)
 {
-	const struct dst_entry *path = dst->path;
+	const struct dst_entry *path = xfrm_dst_path(dst);
 
 	if (!skb)
 		daddr = xfrm_get_dst_nexthop(dst, daddr);
@@ -2740,7 +2747,7 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
 
 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
 {
-	const struct dst_entry *path = dst->path;
+	const struct dst_entry *path = xfrm_dst_path(dst);
 
 	daddr = xfrm_get_dst_nexthop(dst, daddr);
 	path->ops->confirm_neigh(path, daddr);
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 8b23c5b..0250181 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -666,7 +666,7 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
 		if (unlikely(oseq < replay_esn->oseq)) {
 			XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi;
 			xo->seq.hi = oseq_hi;
-
+			replay_esn->oseq_hi = oseq_hi;
 			if (replay_esn->oseq_hi == 0) {
 				replay_esn->oseq--;
 				replay_esn->oseq_hi--;
@@ -678,7 +678,6 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff
 		}
 
 		replay_esn->oseq = oseq;
-		replay_esn->oseq_hi = oseq_hi;
 
 		if (xfrm_aevent_is_on(net))
 			x->repl->notify(x, XFRM_REPLAY_UPDATE);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 500b339..cc4c519 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2049,6 +2049,13 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
 	struct xfrm_mgr *km;
 	struct xfrm_policy *pol = NULL;
 
+	if (!optval && !optlen) {
+		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
+		__sk_dst_reset(sk);
+		return 0;
+	}
+
 	if (optlen <= 0 || optlen > PAGE_SIZE)
 		return -EMSGSIZE;
 
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index adeaa13..7f61a3d 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -12,6 +12,7 @@
 hostprogs-y += tracex4
 hostprogs-y += tracex5
 hostprogs-y += tracex6
+hostprogs-y += tracex7
 hostprogs-y += test_probe_write_user
 hostprogs-y += trace_output
 hostprogs-y += lathist
@@ -40,6 +41,7 @@
 hostprogs-y += xdp_redirect_map
 hostprogs-y += xdp_redirect_cpu
 hostprogs-y += xdp_monitor
+hostprogs-y += xdp_rxq_info
 hostprogs-y += syscall_tp
 
 # Libbpf dependencies
@@ -58,6 +60,7 @@
 tracex4-objs := bpf_load.o $(LIBBPF) tracex4_user.o
 tracex5-objs := bpf_load.o $(LIBBPF) tracex5_user.o
 tracex6-objs := bpf_load.o $(LIBBPF) tracex6_user.o
+tracex7-objs := bpf_load.o $(LIBBPF) tracex7_user.o
 load_sock_ops-objs := bpf_load.o $(LIBBPF) load_sock_ops.o
 test_probe_write_user-objs := bpf_load.o $(LIBBPF) test_probe_write_user_user.o
 trace_output-objs := bpf_load.o $(LIBBPF) trace_output_user.o
@@ -88,6 +91,7 @@
 xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o
 xdp_redirect_cpu-objs := bpf_load.o $(LIBBPF) xdp_redirect_cpu_user.o
 xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o
+xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o
 syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o
 
 # Tell kbuild to always build the programs
@@ -101,6 +105,7 @@
 always += tracex4_kern.o
 always += tracex5_kern.o
 always += tracex6_kern.o
+always += tracex7_kern.o
 always += sock_flags_kern.o
 always += test_probe_write_user_kern.o
 always += trace_output_kern.o
@@ -136,6 +141,8 @@
 always += xdp_redirect_map_kern.o
 always += xdp_redirect_cpu_kern.o
 always += xdp_monitor_kern.o
+always += xdp_rxq_info_kern.o
+always += xdp2skb_meta_kern.o
 always += syscall_tp_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
@@ -155,6 +162,7 @@
 HOSTLOADLIBES_tracex4 += -lelf -lrt
 HOSTLOADLIBES_tracex5 += -lelf
 HOSTLOADLIBES_tracex6 += -lelf
+HOSTLOADLIBES_tracex7 += -lelf
 HOSTLOADLIBES_test_cgrp2_sock2 += -lelf
 HOSTLOADLIBES_load_sock_ops += -lelf
 HOSTLOADLIBES_test_probe_write_user += -lelf
@@ -178,6 +186,7 @@
 HOSTLOADLIBES_xdp_redirect_map += -lelf
 HOSTLOADLIBES_xdp_redirect_cpu += -lelf
 HOSTLOADLIBES_xdp_monitor += -lelf
+HOSTLOADLIBES_xdp_rxq_info += -lelf
 HOSTLOADLIBES_syscall_tp += -lelf
 
 # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 370b749..f6bbf8f 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -35,12 +35,22 @@ struct geneve_opt {
 	u8	opt_data[8]; /* hard-coded to 8 byte */
 };
 
+struct erspan_md2 {
+	__be32 timestamp;
+	__be16 sgt;
+	__be16 flags;
+};
+
 struct vxlan_metadata {
 	u32     gbp;
 };
 
 struct erspan_metadata {
-	__be32 index;
+	union {
+		__be32 index;
+		struct erspan_md2 md2;
+	} u;
+	int version;
 };
 
 SEC("gre_set_tunnel")
@@ -81,6 +91,49 @@ int _gre_get_tunnel(struct __sk_buff *skb)
 	return TC_ACT_OK;
 }
 
+SEC("ip6gretap_set_tunnel")
+int _ip6gretap_set_tunnel(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key key;
+	int ret;
+
+	__builtin_memset(&key, 0x0, sizeof(key));
+	key.remote_ipv6[3] = _htonl(0x11); /* ::11 */
+	key.tunnel_id = 2;
+	key.tunnel_tos = 0;
+	key.tunnel_ttl = 64;
+	key.tunnel_label = 0xabcde;
+
+	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+				     BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	return TC_ACT_OK;
+}
+
+SEC("ip6gretap_get_tunnel")
+int _ip6gretap_get_tunnel(struct __sk_buff *skb)
+{
+	char fmt[] = "key %d remote ip6 ::%x label %x\n";
+	struct bpf_tunnel_key key;
+	int ret;
+
+	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key),
+				     BPF_F_TUNINFO_IPV6);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	bpf_trace_printk(fmt, sizeof(fmt),
+			 key.tunnel_id, key.remote_ipv6[3], key.tunnel_label);
+
+	return TC_ACT_OK;
+}
+
 SEC("erspan_set_tunnel")
 int _erspan_set_tunnel(struct __sk_buff *skb)
 {
@@ -100,7 +153,18 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
 		return TC_ACT_SHOT;
 	}
 
-	md.index = htonl(123);
+	__builtin_memset(&md, 0, sizeof(md));
+#ifdef ERSPAN_V1
+	md.version = 1;
+	md.u.index = htonl(123);
+#else
+	u8 direction = 1;
+	u16 hwid = 7;
+
+	md.version = 2;
+	md.u.md2.flags = htons((direction << 3) | (hwid << 4));
+#endif
+
 	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
 	if (ret < 0) {
 		ERROR(ret);
@@ -113,7 +177,7 @@ int _erspan_set_tunnel(struct __sk_buff *skb)
 SEC("erspan_get_tunnel")
 int _erspan_get_tunnel(struct __sk_buff *skb)
 {
-	char fmt[] = "key %d remote ip 0x%x erspan index 0x%x\n";
+	char fmt[] = "key %d remote ip 0x%x erspan version %d\n";
 	struct bpf_tunnel_key key;
 	struct erspan_metadata md;
 	u32 index;
@@ -131,9 +195,105 @@ int _erspan_get_tunnel(struct __sk_buff *skb)
 		return TC_ACT_SHOT;
 	}
 
-	index = bpf_ntohl(md.index);
 	bpf_trace_printk(fmt, sizeof(fmt),
-			key.tunnel_id, key.remote_ipv4, index);
+			key.tunnel_id, key.remote_ipv4, md.version);
+
+#ifdef ERSPAN_V1
+	char fmt2[] = "\tindex %x\n";
+
+	index = bpf_ntohl(md.u.index);
+	bpf_trace_printk(fmt2, sizeof(fmt2), index);
+#else
+	char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
+
+	bpf_trace_printk(fmt2, sizeof(fmt2),
+		(ntohs(md.u.md2.flags) >> 3) & 0x1,
+		(ntohs(md.u.md2.flags) >> 4) & 0x3f,
+		bpf_ntohl(md.u.md2.timestamp));
+#endif
+
+	return TC_ACT_OK;
+}
+
+SEC("ip4ip6erspan_set_tunnel")
+int _ip4ip6erspan_set_tunnel(struct __sk_buff *skb)
+{
+	struct bpf_tunnel_key key;
+	struct erspan_metadata md;
+	int ret;
+
+	__builtin_memset(&key, 0x0, sizeof(key));
+	key.remote_ipv6[3] = _htonl(0x11);
+	key.tunnel_id = 2;
+	key.tunnel_tos = 0;
+	key.tunnel_ttl = 64;
+
+	ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
+				     BPF_F_TUNINFO_IPV6);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	__builtin_memset(&md, 0, sizeof(md));
+
+#ifdef ERSPAN_V1
+	md.u.index = htonl(123);
+	md.version = 1;
+#else
+	u8 direction = 0;
+	u16 hwid = 17;
+
+	md.version = 2;
+	md.u.md2.flags = htons((direction << 3) | (hwid << 4));
+#endif
+
+	ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md));
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	return TC_ACT_OK;
+}
+
+SEC("ip4ip6erspan_get_tunnel")
+int _ip4ip6erspan_get_tunnel(struct __sk_buff *skb)
+{
+	char fmt[] = "ip6erspan get key %d remote ip6 ::%x erspan version %d\n";
+	struct bpf_tunnel_key key;
+	struct erspan_metadata md;
+	u32 index;
+	int ret;
+
+	ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md));
+	if (ret < 0) {
+		ERROR(ret);
+		return TC_ACT_SHOT;
+	}
+
+	bpf_trace_printk(fmt, sizeof(fmt),
+			key.tunnel_id, key.remote_ipv4, md.version);
+
+#ifdef ERSPAN_V1
+	char fmt2[] = "\tindex %x\n";
+
+	index = bpf_ntohl(md.u.index);
+	bpf_trace_printk(fmt2, sizeof(fmt2), index);
+#else
+	char fmt2[] = "\tdirection %d hwid %x timestamp %u\n";
+
+	bpf_trace_printk(fmt2, sizeof(fmt2),
+		(ntohs(md.u.md2.flags) >> 3) & 0x1,
+		(ntohs(md.u.md2.flags) >> 4) & 0x3f,
+		bpf_ntohl(md.u.md2.timestamp));
+#endif
 
 	return TC_ACT_OK;
 }
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index 3e8232c..1af412e 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -78,7 +78,8 @@ static int test_foo_bar(void)
 	if (join_cgroup(FOO))
 		goto err;
 
-	if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo");
 		goto err;
 	}
@@ -97,7 +98,8 @@ static int test_foo_bar(void)
 	printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
 	assert(system(PING_CMD) != 0);
 
-	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo/bar");
 		goto err;
 	}
@@ -114,7 +116,8 @@ static int test_foo_bar(void)
 	       "This ping in cgroup /foo/bar should fail...\n");
 	assert(system(PING_CMD) != 0);
 
-	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo/bar");
 		goto err;
 	}
@@ -128,7 +131,8 @@ static int test_foo_bar(void)
 	       "This ping in cgroup /foo/bar should pass...\n");
 	assert(system(PING_CMD) == 0);
 
-	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to /foo/bar");
 		goto err;
 	}
@@ -161,13 +165,15 @@ static int test_foo_bar(void)
 		goto err;
 	}
 
-	if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS,
+			     BPF_F_ALLOW_OVERRIDE)) {
 		errno = 0;
 		log_err("Unexpected success attaching overridable prog to /foo/bar");
 		goto err;
 	}
 
-	if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS,
+			     BPF_F_ALLOW_OVERRIDE)) {
 		errno = 0;
 		log_err("Unexpected success attaching overridable prog to /foo");
 		goto err;
@@ -273,27 +279,33 @@ static int test_multiprog(void)
 	if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
 		goto err;
 
-	if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_MULTI)) {
 		log_err("Attaching prog to cg1");
 		goto err;
 	}
-	if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS,
+			     BPF_F_ALLOW_MULTI)) {
 		log_err("Unexpected success attaching the same prog to cg1");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_MULTI)) {
 		log_err("Attaching prog2 to cg1");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to cg2");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, 2)) {
+	if (bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_MULTI)) {
 		log_err("Attaching prog to cg3");
 		goto err;
 	}
-	if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, 1)) {
+	if (bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS,
+			    BPF_F_ALLOW_OVERRIDE)) {
 		log_err("Attaching prog to cg4");
 		goto err;
 	}
diff --git a/samples/bpf/test_override_return.sh b/samples/bpf/test_override_return.sh
new file mode 100755
index 0000000..e68b9ee
--- /dev/null
+++ b/samples/bpf/test_override_return.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+rm -f testfile.img
+dd if=/dev/zero of=testfile.img bs=1M seek=1000 count=1
+DEVICE=$(losetup --show -f testfile.img)
+mkfs.btrfs -f $DEVICE
+mkdir tmpmnt
+./tracex7 $DEVICE
+if [ $? -eq 0 ]
+then
+	echo "SUCCESS!"
+else
+	echo "FAILED!"
+fi
+losetup -d $DEVICE
diff --git a/samples/bpf/test_tunnel_bpf.sh b/samples/bpf/test_tunnel_bpf.sh
index 312e172..ae7f7c3 100755
--- a/samples/bpf/test_tunnel_bpf.sh
+++ b/samples/bpf/test_tunnel_bpf.sh
@@ -33,10 +33,43 @@
 	ip addr add dev $DEV 10.1.1.200/24
 }
 
-function add_erspan_tunnel {
+function add_ip6gretap_tunnel {
+
+	# assign ipv6 address
+	ip netns exec at_ns0 ip addr add ::11/96 dev veth0
+	ip netns exec at_ns0 ip link set dev veth0 up
+	ip addr add dev veth1 ::22/96
+	ip link set dev veth1 up
+
 	# in namespace
 	ip netns exec at_ns0 \
-		ip link add dev $DEV_NS type $TYPE seq key 2 local 172.16.1.100 remote 172.16.1.200 erspan 123
+		ip link add dev $DEV_NS type $TYPE flowlabel 0xbcdef key 2 \
+		local ::11 remote ::22
+
+	ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+	ip netns exec at_ns0 ip addr add dev $DEV_NS fc80::100/96
+	ip netns exec at_ns0 ip link set dev $DEV_NS up
+
+	# out of namespace
+	ip link add dev $DEV type $TYPE external
+	ip addr add dev $DEV 10.1.1.200/24
+	ip addr add dev $DEV fc80::200/24
+	ip link set dev $DEV up
+}
+
+function add_erspan_tunnel {
+	# in namespace
+	if [ "$1" == "v1" ]; then
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 1 erspan 123
+	else
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 2 erspan_dir 1 erspan_hwid 3
+	fi
 	ip netns exec at_ns0 ip link set dev $DEV_NS up
 	ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
 
@@ -46,6 +79,35 @@
 	ip addr add dev $DEV 10.1.1.200/24
 }
 
+function add_ip6erspan_tunnel {
+
+	# assign ipv6 address
+	ip netns exec at_ns0 ip addr add ::11/96 dev veth0
+	ip netns exec at_ns0 ip link set dev veth0 up
+	ip addr add dev veth1 ::22/96
+	ip link set dev veth1 up
+
+	# in namespace
+	if [ "$1" == "v1" ]; then
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local ::11 remote ::22 \
+		erspan_ver 1 erspan 123
+	else
+		ip netns exec at_ns0 \
+		ip link add dev $DEV_NS type $TYPE seq key 2 \
+		local ::11 remote ::22 \
+		erspan_ver 2 erspan_dir 1 erspan_hwid 7
+	fi
+	ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+	ip netns exec at_ns0 ip link set dev $DEV_NS up
+
+	# out of namespace
+	ip link add dev $DEV type $TYPE external
+	ip addr add dev $DEV 10.1.1.200/24
+	ip link set dev $DEV up
+}
+
 function add_vxlan_tunnel {
 	# Set static ARP entry here because iptables set-mark works
 	# on L3 packet, as a result not applying to ARP packets,
@@ -113,18 +175,65 @@
 	cleanup
 }
 
+function test_ip6gre {
+	TYPE=ip6gre
+	DEV_NS=ip6gre00
+	DEV=ip6gre11
+	config_device
+	# reuse the ip6gretap function
+	add_ip6gretap_tunnel
+	attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
+	# underlay
+	ping6 -c 4 ::11
+	# overlay: ipv4 over ipv6
+	ip netns exec at_ns0 ping -c 1 10.1.1.200
+	ping -c 1 10.1.1.100
+	# overlay: ipv6 over ipv6
+	ip netns exec at_ns0 ping6 -c 1 fc80::200
+	cleanup
+}
+
+function test_ip6gretap {
+	TYPE=ip6gretap
+	DEV_NS=ip6gretap00
+	DEV=ip6gretap11
+	config_device
+	add_ip6gretap_tunnel
+	attach_bpf $DEV ip6gretap_set_tunnel ip6gretap_get_tunnel
+	# underlay
+	ping6 -c 4 ::11
+	# overlay: ipv4 over ipv6
+	ip netns exec at_ns0 ping -i .2 -c 1 10.1.1.200
+	ping -c 1 10.1.1.100
+	# overlay: ipv6 over ipv6
+	ip netns exec at_ns0 ping6 -c 1 fc80::200
+	cleanup
+}
+
 function test_erspan {
 	TYPE=erspan
 	DEV_NS=erspan00
 	DEV=erspan11
 	config_device
-	add_erspan_tunnel
+	add_erspan_tunnel $1
 	attach_bpf $DEV erspan_set_tunnel erspan_get_tunnel
 	ping -c 1 10.1.1.100
 	ip netns exec at_ns0 ping -c 1 10.1.1.200
 	cleanup
 }
 
+function test_ip6erspan {
+	TYPE=ip6erspan
+	DEV_NS=ip6erspan00
+	DEV=ip6erspan11
+	config_device
+	add_ip6erspan_tunnel $1
+	attach_bpf $DEV ip4ip6erspan_set_tunnel ip4ip6erspan_get_tunnel
+	ping6 -c 3 ::11
+	ip netns exec at_ns0 ping -c 1 10.1.1.200
+	cleanup
+}
+
 function test_vxlan {
 	TYPE=vxlan
 	DEV_NS=vxlan00
@@ -175,9 +284,12 @@
 	ip link del veth1
 	ip link del ipip11
 	ip link del gretap11
+	ip link del ip6gre11
+	ip link del ip6gretap11
 	ip link del vxlan11
 	ip link del geneve11
 	ip link del erspan11
+	ip link del ip6erspan11
 	pkill tcpdump
 	pkill cat
 	set -ex
@@ -187,8 +299,16 @@
 cleanup
 echo "Testing GRE tunnel..."
 test_gre
+echo "Testing IP6GRE tunnel..."
+test_ip6gre
+echo "Testing IP6GRETAP tunnel..."
+test_ip6gretap
 echo "Testing ERSPAN tunnel..."
-test_erspan
+test_erspan v1
+test_erspan v2
+echo "Testing IP6ERSPAN tunnel..."
+test_ip6erspan v1
+test_ip6erspan v2
 echo "Testing VXLAN tunnel..."
 test_vxlan
 echo "Testing GENEVE tunnel..."
diff --git a/samples/bpf/tracex7_kern.c b/samples/bpf/tracex7_kern.c
new file mode 100644
index 0000000..1ab308a
--- /dev/null
+++ b/samples/bpf/tracex7_kern.c
@@ -0,0 +1,16 @@
+#include <uapi/linux/ptrace.h>
+#include <uapi/linux/bpf.h>
+#include <linux/version.h>
+#include "bpf_helpers.h"
+
+SEC("kprobe/open_ctree")
+int bpf_prog1(struct pt_regs *ctx)
+{
+	unsigned long rc = -12;
+
+	bpf_override_return(ctx, rc);
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex7_user.c b/samples/bpf/tracex7_user.c
new file mode 100644
index 0000000..8a52ac4
--- /dev/null
+++ b/samples/bpf/tracex7_user.c
@@ -0,0 +1,28 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+int main(int argc, char **argv)
+{
+	FILE *f;
+	char filename[256];
+	char command[256];
+	int ret;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	snprintf(command, 256, "mount %s tmpmnt/", argv[1]);
+	f = popen(command, "r");
+	ret = pclose(f);
+
+	return ret ? 0 : 1;
+}
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh
new file mode 100755
index 0000000..b9c9549
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta.sh
@@ -0,0 +1,220 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+#
+# Bash-shell example on using iproute2 tools 'tc' and 'ip' to load
+# eBPF programs, both for XDP and clsbpf.  Shell script function
+# wrappers and even long options parsing is illustrated, for ease of
+# use.
+#
+# Related to sample/bpf/xdp2skb_meta_kern.c, which contains BPF-progs
+# that need to collaborate between XDP and TC hooks.  Thus, it is
+# convenient that the same tool load both programs that need to work
+# together.
+#
+BPF_FILE=xdp2skb_meta_kern.o
+DIR=$(dirname $0)
+
+export TC=/usr/sbin/tc
+export IP=/usr/sbin/ip
+
+function usage() {
+    echo ""
+    echo "Usage: $0 [-vfh] --dev ethX"
+    echo "  -d | --dev     :             Network device (required)"
+    echo "  --flush        :             Cleanup flush TC and XDP progs"
+    echo "  --list         : (\$LIST)     List TC and XDP progs"
+    echo "  -v | --verbose : (\$VERBOSE)  Verbose"
+    echo "  --dry-run      : (\$DRYRUN)   Dry-run only (echo commands)"
+    echo ""
+}
+
+## -- General shell logging cmds --
+function err() {
+    local exitcode=$1
+    shift
+    echo "ERROR: $@" >&2
+    exit $exitcode
+}
+
+function info() {
+    if [[ -n "$VERBOSE" ]]; then
+	echo "# $@"
+    fi
+}
+
+## -- Helper function calls --
+
+# Wrapper call for TC and IP
+# - Will display the offending command on failure
+function _call_cmd() {
+    local cmd="$1"
+    local allow_fail="$2"
+    shift 2
+    if [[ -n "$VERBOSE" ]]; then
+	echo "$(basename $cmd) $@"
+    fi
+    if [[ -n "$DRYRUN" ]]; then
+	return
+    fi
+    $cmd "$@"
+    local status=$?
+    if (( $status != 0 )); then
+	if [[ "$allow_fail" == "" ]]; then
+	    err 2 "Exec error($status) occurred cmd: \"$cmd $@\""
+	fi
+    fi
+}
+function call_tc() {
+    _call_cmd "$TC" "" "$@"
+}
+function call_tc_allow_fail() {
+    _call_cmd "$TC" "allow_fail" "$@"
+}
+function call_ip() {
+    _call_cmd "$IP" "" "$@"
+}
+
+##  --- Parse command line arguments / parameters ---
+# Using external program "getopt" to get --long-options
+OPTIONS=$(getopt -o vfhd: \
+    --long verbose,flush,help,list,dev:,dry-run -- "$@")
+if (( $? != 0 )); then
+    err 4 "Error calling getopt"
+fi
+eval set -- "$OPTIONS"
+
+unset DEV
+unset FLUSH
+while true; do
+    case "$1" in
+	-d | --dev ) # device
+	    DEV=$2
+	    info "Device set to: DEV=$DEV" >&2
+	    shift 2
+	    ;;
+	-v | --verbose)
+	    VERBOSE=yes
+	    # info "Verbose mode: VERBOSE=$VERBOSE" >&2
+	    shift
+	    ;;
+	--dry-run )
+	    DRYRUN=yes
+	    VERBOSE=yes
+	    info "Dry-run mode: enable VERBOSE and don't call TC+IP" >&2
+	    shift
+            ;;
+	-f | --flush )
+	    FLUSH=yes
+	    shift
+	    ;;
+	--list )
+	    LIST=yes
+	    shift
+	    ;;
+	-- )
+	    shift
+	    break
+	    ;;
+	-h | --help )
+	    usage;
+	    exit 0
+	    ;;
+	* )
+	    shift
+	    break
+	    ;;
+    esac
+done
+
+FILE="$DIR/$BPF_FILE"
+if [[ ! -e $FILE ]]; then
+    err 3 "Missing BPF object file ($FILE)"
+fi
+
+if [[ -z $DEV ]]; then
+    usage
+    err 2 "Please specify network device -- required option --dev"
+fi
+
+## -- Function calls --
+
+function list_tc()
+{
+    local device="$1"
+    shift
+    info "Listing current TC ingress rules"
+    call_tc filter show dev $device ingress
+}
+
+function list_xdp()
+{
+    local device="$1"
+    shift
+    info "Listing current XDP device($device) setting"
+    call_ip link show dev $device | grep --color=auto xdp
+}
+
+function flush_tc()
+{
+    local device="$1"
+    shift
+    info "Flush TC on device: $device"
+    call_tc_allow_fail filter del dev $device ingress
+    call_tc_allow_fail qdisc del dev $device clsact
+}
+
+function flush_xdp()
+{
+    local device="$1"
+    shift
+    info "Flush XDP on device: $device"
+    call_ip link set dev $device xdp off
+}
+
+function attach_tc_mark()
+{
+    local device="$1"
+    local file="$2"
+    local prog="tc_mark"
+    shift 2
+
+    # Re-attach clsact to clear/flush existing role
+    call_tc_allow_fail qdisc del dev $device clsact 2> /dev/null
+    call_tc            qdisc add dev $device clsact
+
+    # Attach BPF prog
+    call_tc filter add dev $device ingress \
+	    prio 1 handle 1 bpf da obj $file sec $prog
+}
+
+function attach_xdp_mark()
+{
+    local device="$1"
+    local file="$2"
+    local prog="xdp_mark"
+    shift 2
+
+    # Remove XDP prog in-case it's already loaded
+    # TODO: Need ip-link option to override/replace existing XDP prog
+    flush_xdp $device
+
+    # Attach XDP/BPF prog
+    call_ip link set dev $device xdp obj $file sec $prog
+}
+
+if [[ -n $FLUSH ]]; then
+    flush_tc  $DEV
+    flush_xdp $DEV
+    exit 0
+fi
+
+if [[ -n $LIST ]]; then
+    list_tc  $DEV
+    list_xdp $DEV
+    exit 0
+fi
+
+attach_tc_mark  $DEV $FILE
+attach_xdp_mark $DEV $FILE
diff --git a/samples/bpf/xdp2skb_meta_kern.c b/samples/bpf/xdp2skb_meta_kern.c
new file mode 100644
index 0000000..12e1024
--- /dev/null
+++ b/samples/bpf/xdp2skb_meta_kern.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ * Example howto transfer info from XDP to SKB, e.g. skb->mark
+ * -----------------------------------------------------------
+ * This uses the XDP data_meta infrastructure, and is a cooperation
+ * between two bpf-programs (1) XDP and (2) clsact at TC-ingress hook.
+ *
+ * Notice: This example does not use the BPF C-loader (bpf_load.c),
+ * but instead rely on the iproute2 TC tool for loading BPF-objects.
+ */
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/pkt_cls.h>
+
+#include "bpf_helpers.h"
+
+/*
+ * This struct is stored in the XDP 'data_meta' area, which is located
+ * just in-front-of the raw packet payload data.  The meaning is
+ * specific to these two BPF programs that use it as a communication
+ * channel.  XDP adjust/increase the area via a bpf-helper, and TC use
+ * boundary checks to see if data have been provided.
+ *
+ * The struct must be 4 byte aligned, which here is enforced by the
+ * struct __attribute__((aligned(4))).
+ */
+struct meta_info {
+	__u32 mark;
+} __attribute__((aligned(4)));
+
+SEC("xdp_mark")
+int _xdp_mark(struct xdp_md *ctx)
+{
+	struct meta_info *meta;
+	void *data, *data_end;
+	int ret;
+
+	/* Reserve space in-front data pointer for our meta info.
+	 * (Notice drivers not supporting data_meta will fail here!)
+	 */
+	ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta));
+	if (ret < 0)
+		return XDP_ABORTED;
+
+	/* For some unknown reason, these ctx pointers must be read
+	 * after bpf_xdp_adjust_meta, else verifier will reject prog.
+	 */
+	data = (void *)(unsigned long)ctx->data;
+
+	/* Check data_meta have room for meta_info struct */
+	meta = (void *)(unsigned long)ctx->data_meta;
+	if (meta + 1 > data)
+		return XDP_ABORTED;
+
+	meta->mark = 42;
+
+	return XDP_PASS;
+}
+
+SEC("tc_mark")
+int _tc_mark(struct __sk_buff *ctx)
+{
+	void *data      = (void *)(unsigned long)ctx->data;
+	void *data_end  = (void *)(unsigned long)ctx->data_end;
+	void *data_meta = (void *)(unsigned long)ctx->data_meta;
+	struct meta_info *meta = data_meta;
+
+	/* Check XDP gave us some data_meta */
+	if (meta + 1 > data) {
+		ctx->mark = 41;
+		 /* Skip "accept" if no data_meta is avail */
+		return TC_ACT_OK;
+	}
+
+	/* Hint: See func tc_cls_act_is_valid_access() for BPF_WRITE access */
+	ctx->mark = meta->mark; /* Transfer XDP-mark to SKB-mark */
+
+	return TC_ACT_OK;
+}
+
+/* Manually attaching these programs:
+export DEV=ixgbe2
+export FILE=xdp2skb_meta_kern.o
+
+# via TC command
+tc qdisc del dev $DEV clsact 2> /dev/null
+tc qdisc add dev $DEV clsact
+tc filter  add dev $DEV ingress prio 1 handle 1 bpf da obj $FILE sec tc_mark
+tc filter show dev $DEV ingress
+
+# XDP via IP command:
+ip link set dev $DEV xdp off
+ip link set dev $DEV xdp obj $FILE sec xdp_mark
+
+# Use iptable to "see" if SKBs are marked
+iptables -I INPUT -p icmp -m mark --mark 41  # == 0x29
+iptables -I INPUT -p icmp -m mark --mark 42  # == 0x2a
+
+# Hint: catch XDP_ABORTED errors via
+perf record -e xdp:*
+perf script
+
+*/
diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c
new file mode 100644
index 0000000..3fd2092
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_kern.c
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ *
+ *  Example howto extract XDP RX-queue info
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* Config setup from with userspace
+ *
+ * User-side setup ifindex in config_map, to verify that
+ * ctx->ingress_ifindex is correct (against configured ifindex)
+ */
+struct config {
+	__u32 action;
+	int ifindex;
+};
+struct bpf_map_def SEC("maps") config_map = {
+	.type		= BPF_MAP_TYPE_ARRAY,
+	.key_size	= sizeof(int),
+	.value_size	= sizeof(struct config),
+	.max_entries	= 1,
+};
+
+/* Common stats data record (shared with userspace) */
+struct datarec {
+	__u64 processed;
+	__u64 issue;
+};
+
+struct bpf_map_def SEC("maps") stats_global_map = {
+	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size	= sizeof(u32),
+	.value_size	= sizeof(struct datarec),
+	.max_entries	= 1,
+};
+
+#define MAX_RXQs 64
+
+/* Stats per rx_queue_index (per CPU) */
+struct bpf_map_def SEC("maps") rx_queue_index_map = {
+	.type		= BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size	= sizeof(u32),
+	.value_size	= sizeof(struct datarec),
+	.max_entries	= MAX_RXQs + 1,
+};
+
+SEC("xdp_prog0")
+int  xdp_prognum0(struct xdp_md *ctx)
+{
+	void *data_end = (void *)(long)ctx->data_end;
+	void *data     = (void *)(long)ctx->data;
+	struct datarec *rec, *rxq_rec;
+	int ingress_ifindex;
+	struct config *config;
+	u32 key = 0;
+
+	/* Global stats record */
+	rec = bpf_map_lookup_elem(&stats_global_map, &key);
+	if (!rec)
+		return XDP_ABORTED;
+	rec->processed++;
+
+	/* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF
+	 * instructions inside kernel to access xdp_rxq->dev->ifindex
+	 */
+	ingress_ifindex = ctx->ingress_ifindex;
+
+	config = bpf_map_lookup_elem(&config_map, &key);
+	if (!config)
+		return XDP_ABORTED;
+
+	/* Simple test: check ctx provided ifindex is as expected */
+	if (ingress_ifindex != config->ifindex) {
+		/* count this error case */
+		rec->issue++;
+		return XDP_ABORTED;
+	}
+
+	/* Update stats per rx_queue_index. Handle if rx_queue_index
+	 * is larger than stats map can contain info for.
+	 */
+	key = ctx->rx_queue_index;
+	if (key >= MAX_RXQs)
+		key = MAX_RXQs;
+	rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key);
+	if (!rxq_rec)
+		return XDP_ABORTED;
+	rxq_rec->processed++;
+	if (key == MAX_RXQs)
+		rxq_rec->issue++;
+
+	return config->action;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
new file mode 100644
index 0000000..32430e8
--- /dev/null
+++ b/samples/bpf/xdp_rxq_info_user.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+static const char *__doc__ = " XDP RX-queue info extract example\n\n"
+	"Monitor how many packets per sec (pps) are received\n"
+	"per NIC RX queue index and which CPU processed the packet\n"
+	;
+
+#include <errno.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <sys/resource.h>
+#include <getopt.h>
+#include <net/if.h>
+#include <time.h>
+
+#include <arpa/inet.h>
+#include <linux/if_link.h>
+
+#include "libbpf.h"
+#include "bpf_load.h"
+#include "bpf_util.h"
+
+static int ifindex = -1;
+static char ifname_buf[IF_NAMESIZE];
+static char *ifname;
+
+static __u32 xdp_flags;
+
+/* Exit return codes */
+#define EXIT_OK		0
+#define EXIT_FAIL		1
+#define EXIT_FAIL_OPTION	2
+#define EXIT_FAIL_XDP		3
+#define EXIT_FAIL_BPF		4
+#define EXIT_FAIL_MEM		5
+
+static const struct option long_options[] = {
+	{"help",	no_argument,		NULL, 'h' },
+	{"dev",		required_argument,	NULL, 'd' },
+	{"skb-mode",	no_argument,		NULL, 'S' },
+	{"sec",		required_argument,	NULL, 's' },
+	{"no-separators", no_argument,		NULL, 'z' },
+	{"action",	required_argument,	NULL, 'a' },
+	{0, 0, NULL,  0 }
+};
+
+static void int_exit(int sig)
+{
+	fprintf(stderr,
+		"Interrupted: Removing XDP program on ifindex:%d device:%s\n",
+		ifindex, ifname);
+	if (ifindex > -1)
+		set_link_xdp_fd(ifindex, -1, xdp_flags);
+	exit(EXIT_OK);
+}
+
+struct config {
+	__u32 action;
+	int ifindex;
+};
+#define XDP_ACTION_MAX (XDP_TX + 1)
+#define XDP_ACTION_MAX_STRLEN 11
+static const char *xdp_action_names[XDP_ACTION_MAX] = {
+	[XDP_ABORTED]	= "XDP_ABORTED",
+	[XDP_DROP]	= "XDP_DROP",
+	[XDP_PASS]	= "XDP_PASS",
+	[XDP_TX]	= "XDP_TX",
+};
+
+static const char *action2str(int action)
+{
+	if (action < XDP_ACTION_MAX)
+		return xdp_action_names[action];
+	return NULL;
+}
+
+static int parse_xdp_action(char *action_str)
+{
+	size_t maxlen;
+	__u64 action = -1;
+	int i;
+
+	for (i = 0; i < XDP_ACTION_MAX; i++) {
+		maxlen = XDP_ACTION_MAX_STRLEN;
+		if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) {
+			action = i;
+			break;
+		}
+	}
+	return action;
+}
+
+static void list_xdp_actions(void)
+{
+	int i;
+
+	printf("Available XDP --action <options>\n");
+	for (i = 0; i < XDP_ACTION_MAX; i++)
+		printf("\t%s\n", xdp_action_names[i]);
+	printf("\n");
+}
+
+static void usage(char *argv[])
+{
+	int i;
+
+	printf("\nDOCUMENTATION:\n%s\n", __doc__);
+	printf(" Usage: %s (options-see-below)\n", argv[0]);
+	printf(" Listing options:\n");
+	for (i = 0; long_options[i].name != 0; i++) {
+		printf(" --%-12s", long_options[i].name);
+		if (long_options[i].flag != NULL)
+			printf(" flag (internal value:%d)",
+				*long_options[i].flag);
+		else
+			printf(" short-option: -%c",
+				long_options[i].val);
+		printf("\n");
+	}
+	printf("\n");
+	list_xdp_actions();
+}
+
+#define NANOSEC_PER_SEC 1000000000 /* 10^9 */
+static __u64 gettime(void)
+{
+	struct timespec t;
+	int res;
+
+	res = clock_gettime(CLOCK_MONOTONIC, &t);
+	if (res < 0) {
+		fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
+		exit(EXIT_FAIL);
+	}
+	return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
+}
+
+/* Common stats data record shared with _kern.c */
+struct datarec {
+	__u64 processed;
+	__u64 issue;
+};
+struct record {
+	__u64 timestamp;
+	struct datarec total;
+	struct datarec *cpu;
+};
+struct stats_record {
+	struct record stats;
+	struct record *rxq;
+};
+
+static struct datarec *alloc_record_per_cpu(void)
+{
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct datarec *array;
+	size_t size;
+
+	size = sizeof(struct datarec) * nr_cpus;
+	array = malloc(size);
+	memset(array, 0, size);
+	if (!array) {
+		fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
+		exit(EXIT_FAIL_MEM);
+	}
+	return array;
+}
+
+static struct record *alloc_record_per_rxq(void)
+{
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	struct record *array;
+	size_t size;
+
+	size = sizeof(struct record) * nr_rxqs;
+	array = malloc(size);
+	memset(array, 0, size);
+	if (!array) {
+		fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
+		exit(EXIT_FAIL_MEM);
+	}
+	return array;
+}
+
+static struct stats_record *alloc_stats_record(void)
+{
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	struct stats_record *rec;
+	int i;
+
+	rec = malloc(sizeof(*rec));
+	memset(rec, 0, sizeof(*rec));
+	if (!rec) {
+		fprintf(stderr, "Mem alloc error\n");
+		exit(EXIT_FAIL_MEM);
+	}
+	rec->rxq = alloc_record_per_rxq();
+	for (i = 0; i < nr_rxqs; i++)
+		rec->rxq[i].cpu = alloc_record_per_cpu();
+
+	rec->stats.cpu = alloc_record_per_cpu();
+	return rec;
+}
+
+static void free_stats_record(struct stats_record *r)
+{
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	int i;
+
+	for (i = 0; i < nr_rxqs; i++)
+		free(r->rxq[i].cpu);
+
+	free(r->rxq);
+	free(r->stats.cpu);
+	free(r);
+}
+
+static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
+{
+	/* For percpu maps, userspace gets a value per possible CPU */
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct datarec values[nr_cpus];
+	__u64 sum_processed = 0;
+	__u64 sum_issue = 0;
+	int i;
+
+	if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
+		fprintf(stderr,
+			"ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
+		return false;
+	}
+	/* Get time as close as possible to reading map contents */
+	rec->timestamp = gettime();
+
+	/* Record and sum values from each CPU */
+	for (i = 0; i < nr_cpus; i++) {
+		rec->cpu[i].processed = values[i].processed;
+		sum_processed        += values[i].processed;
+		rec->cpu[i].issue = values[i].issue;
+		sum_issue        += values[i].issue;
+	}
+	rec->total.processed = sum_processed;
+	rec->total.issue     = sum_issue;
+	return true;
+}
+
+static void stats_collect(struct stats_record *rec)
+{
+	int fd, i, max_rxqs;
+
+	fd = map_data[1].fd; /* map: stats_global_map */
+	map_collect_percpu(fd, 0, &rec->stats);
+
+	fd = map_data[2].fd; /* map: rx_queue_index_map */
+	max_rxqs = map_data[2].def.max_entries;
+	for (i = 0; i < max_rxqs; i++)
+		map_collect_percpu(fd, i, &rec->rxq[i]);
+}
+
+static double calc_period(struct record *r, struct record *p)
+{
+	double period_ = 0;
+	__u64 period = 0;
+
+	period = r->timestamp - p->timestamp;
+	if (period > 0)
+		period_ = ((double) period / NANOSEC_PER_SEC);
+
+	return period_;
+}
+
+static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
+{
+	__u64 packets = 0;
+	__u64 pps = 0;
+
+	if (period_ > 0) {
+		packets = r->processed - p->processed;
+		pps = packets / period_;
+	}
+	return pps;
+}
+
+static __u64 calc_errs_pps(struct datarec *r,
+			    struct datarec *p, double period_)
+{
+	__u64 packets = 0;
+	__u64 pps = 0;
+
+	if (period_ > 0) {
+		packets = r->issue - p->issue;
+		pps = packets / period_;
+	}
+	return pps;
+}
+
+static void stats_print(struct stats_record *stats_rec,
+			struct stats_record *stats_prev,
+			int action)
+{
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	unsigned int nr_rxqs = map_data[2].def.max_entries;
+	double pps = 0, err = 0;
+	struct record *rec, *prev;
+	double t;
+	int rxq;
+	int i;
+
+	/* Header */
+	printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n",
+	       ifname, ifindex, action2str(action));
+
+	/* stats_global_map */
+	{
+		char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n";
+		char *fm2_rx = "%-15s %-7s %'-11.0f\n";
+		char *errstr = "";
+
+		printf("%-15s %-7s %-11s %-11s\n",
+		       "XDP stats", "CPU", "pps", "issue-pps");
+
+		rec  =  &stats_rec->stats;
+		prev = &stats_prev->stats;
+		t = calc_period(rec, prev);
+		for (i = 0; i < nr_cpus; i++) {
+			struct datarec *r = &rec->cpu[i];
+			struct datarec *p = &prev->cpu[i];
+
+			pps = calc_pps     (r, p, t);
+			err = calc_errs_pps(r, p, t);
+			if (err > 0)
+				errstr = "invalid-ifindex";
+			if (pps > 0)
+				printf(fmt_rx, "XDP-RX CPU",
+					i, pps, err, errstr);
+		}
+		pps  = calc_pps     (&rec->total, &prev->total, t);
+		err  = calc_errs_pps(&rec->total, &prev->total, t);
+		printf(fm2_rx, "XDP-RX CPU", "total", pps, err);
+	}
+
+	/* rx_queue_index_map */
+	printf("\n%-15s %-7s %-11s %-11s\n",
+	       "RXQ stats", "RXQ:CPU", "pps", "issue-pps");
+
+	for (rxq = 0; rxq < nr_rxqs; rxq++) {
+		char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n";
+		char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n";
+		char *errstr = "";
+		int rxq_ = rxq;
+
+		/* Last RXQ in map catch overflows */
+		if (rxq_ == nr_rxqs - 1)
+			rxq_ = -1;
+
+		rec  =  &stats_rec->rxq[rxq];
+		prev = &stats_prev->rxq[rxq];
+		t = calc_period(rec, prev);
+		for (i = 0; i < nr_cpus; i++) {
+			struct datarec *r = &rec->cpu[i];
+			struct datarec *p = &prev->cpu[i];
+
+			pps = calc_pps     (r, p, t);
+			err = calc_errs_pps(r, p, t);
+			if (err > 0) {
+				if (rxq_ == -1)
+					errstr = "map-overflow-RXQ";
+				else
+					errstr = "err";
+			}
+			if (pps > 0)
+				printf(fmt_rx, "rx_queue_index",
+				       rxq_, i, pps, err, errstr);
+		}
+		pps  = calc_pps     (&rec->total, &prev->total, t);
+		err  = calc_errs_pps(&rec->total, &prev->total, t);
+		if (pps || err)
+			printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err);
+	}
+}
+
+
+/* Pointer swap trick */
+static inline void swap(struct stats_record **a, struct stats_record **b)
+{
+	struct stats_record *tmp;
+
+	tmp = *a;
+	*a = *b;
+	*b = tmp;
+}
+
+static void stats_poll(int interval, int action)
+{
+	struct stats_record *record, *prev;
+
+	record = alloc_stats_record();
+	prev   = alloc_stats_record();
+	stats_collect(record);
+
+	while (1) {
+		swap(&prev, &record);
+		stats_collect(record);
+		stats_print(record, prev, action);
+		sleep(interval);
+	}
+
+	free_stats_record(record);
+	free_stats_record(prev);
+}
+
+
+int main(int argc, char **argv)
+{
+	struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
+	bool use_separators = true;
+	struct config cfg = { 0 };
+	char filename[256];
+	int longindex = 0;
+	int interval = 2;
+	__u32 key = 0;
+	int opt, err;
+
+	char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 };
+	int action = XDP_PASS; /* Default action */
+	char *action_str = NULL;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+		perror("setrlimit(RLIMIT_MEMLOCK)");
+		return 1;
+	}
+
+	if (load_bpf_file(filename)) {
+		fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf);
+		return EXIT_FAIL;
+	}
+
+	if (!prog_fd[0]) {
+		fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno));
+		return EXIT_FAIL;
+	}
+
+	/* Parse commands line args */
+	while ((opt = getopt_long(argc, argv, "hSd:",
+				  long_options, &longindex)) != -1) {
+		switch (opt) {
+		case 'd':
+			if (strlen(optarg) >= IF_NAMESIZE) {
+				fprintf(stderr, "ERR: --dev name too long\n");
+				goto error;
+			}
+			ifname = (char *)&ifname_buf;
+			strncpy(ifname, optarg, IF_NAMESIZE);
+			ifindex = if_nametoindex(ifname);
+			if (ifindex == 0) {
+				fprintf(stderr,
+					"ERR: --dev name unknown err(%d):%s\n",
+					errno, strerror(errno));
+				goto error;
+			}
+			break;
+		case 's':
+			interval = atoi(optarg);
+			break;
+		case 'S':
+			xdp_flags |= XDP_FLAGS_SKB_MODE;
+			break;
+		case 'z':
+			use_separators = false;
+			break;
+		case 'a':
+			action_str = (char *)&action_str_buf;
+			strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN);
+			break;
+		case 'h':
+		error:
+		default:
+			usage(argv);
+			return EXIT_FAIL_OPTION;
+		}
+	}
+	/* Required option */
+	if (ifindex == -1) {
+		fprintf(stderr, "ERR: required option --dev missing\n");
+		usage(argv);
+		return EXIT_FAIL_OPTION;
+	}
+	cfg.ifindex = ifindex;
+
+	/* Parse action string */
+	if (action_str) {
+		action = parse_xdp_action(action_str);
+		if (action < 0) {
+			fprintf(stderr, "ERR: Invalid XDP --action: %s\n",
+				action_str);
+			list_xdp_actions();
+			return EXIT_FAIL_OPTION;
+		}
+	}
+	cfg.action = action;
+
+	/* Trick to pretty printf with thousands separators use %' */
+	if (use_separators)
+		setlocale(LC_NUMERIC, "en_US");
+
+	/* User-side setup ifindex in config_map */
+	err = bpf_map_update_elem(map_fd[0], &key, &cfg, 0);
+	if (err) {
+		fprintf(stderr, "Store config failed (err:%d)\n", err);
+		exit(EXIT_FAIL_BPF);
+	}
+
+	/* Remove XDP program when program is interrupted */
+	signal(SIGINT, int_exit);
+
+	if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) {
+		fprintf(stderr, "link set xdp fd failed\n");
+		return EXIT_FAIL_XDP;
+	}
+
+	stats_poll(interval, action);
+	return EXIT_OK;
+}
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 56e354f..92818890 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -452,7 +452,7 @@ int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
 	if (dst) {
 		struct dst_entry *iter;
 
-		for (iter = dst; iter != NULL; iter = iter->child) {
+		for (iter = dst; iter != NULL; iter = xfrm_dst_child(iter)) {
 			struct xfrm_state *x = iter->xfrm;
 
 			if (x && selinux_authorizable_xfrm(x))
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 07a66974..c8ec0ae 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -9,6 +9,35 @@
 CFLAGS += -Wall -O2
 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
 
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+FEATURE_USER = .bpf
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+  check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 %.yacc.c: %.y
 	$(YACC) -o $@ -d $<
 
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index 75bf526..30044bc 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -72,7 +72,14 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
 
 	disassemble_init_for_target(&info);
 
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+	disassemble = disassembler(info.arch,
+				   bfd_big_endian(bfdf),
+				   info.mach,
+				   bfdf);
+#else
 	disassemble = disassembler(bfdf);
+#endif
 	assert(disassemble);
 
 	do {
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 37292bb..c462a92 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -3,12 +3,16 @@
 
 INSTALL ?= install
 RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
 
-# Make the path relative to DESTDIR, not prefix
-ifndef DESTDIR
-prefix ?= /usr/local
+ifeq ($(V),1)
+  Q =
+else
+  Q = @
 endif
-mandir ?= $(prefix)/share/man
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
 man8dir = $(mandir)/man8
 
 MAN8_RST = $(wildcard *.rst)
@@ -20,15 +24,21 @@
 man8: $(DOC_MAN8)
 
 $(OUTPUT)%.8: %.rst
-	rst2man $< > $@
+	$(QUIET_GEN)rst2man $< > $@
 
 clean:
-	$(call QUIET_CLEAN, Documentation) $(RM) $(DOC_MAN8)
+	$(call QUIET_CLEAN, Documentation)
+	$(Q)$(RM) $(DOC_MAN8)
 
 install: man
-	$(call QUIET_INSTALL, Documentation-man) \
-		$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir); \
-		$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir);
+	$(call QUIET_INSTALL, Documentation-man)
+	$(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
+	$(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
 
-.PHONY: man man8 clean install
+uninstall:
+	$(call QUIET_UNINST, Documentation-man)
+	$(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
+	$(Q)$(RMDIR) $(DESTDIR)$(man8dir)
+
+.PHONY: man man8 clean install uninstall
 .DEFAULT_GOAL := man
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
new file mode 100644
index 0000000..2fe2a1b
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -0,0 +1,118 @@
+================
+bpftool-cgroup
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF progs
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+SYNOPSIS
+========
+
+	**bpftool** [*OPTIONS*] **cgroup** *COMMAND*
+
+	*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
+
+	*COMMANDS* :=
+	{ **show** | **list** | **attach** | **detach** | **help** }
+
+MAP COMMANDS
+=============
+
+|	**bpftool** **cgroup { show | list }** *CGROUP*
+|	**bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+|	**bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+|	**bpftool** **cgroup help**
+|
+|	*PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+|	*ATTACH_TYPE* := { *ingress* | *egress* | *sock_create* | *sock_ops* | *device* }
+|	*ATTACH_FLAGS* := { *multi* | *override* }
+
+DESCRIPTION
+===========
+	**bpftool cgroup { show | list }** *CGROUP*
+		  List all programs attached to the cgroup *CGROUP*.
+
+		  Output will start with program ID followed by attach type,
+		  attach flags and program name.
+
+	**bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+		  Attach program *PROG* to the cgroup *CGROUP* with attach type
+		  *ATTACH_TYPE* and optional *ATTACH_FLAGS*.
+
+		  *ATTACH_FLAGS* can be one of: **override** if a sub-cgroup installs
+		  some bpf program, the program in this cgroup yields to sub-cgroup
+		  program; **multi** if a sub-cgroup installs some bpf program,
+		  that cgroup program gets run in addition to the program in this
+		  cgroup.
+
+		  Only one program is allowed to be attached to a cgroup with
+		  no attach flags or the **override** flag. Attaching another
+		  program will release old program and attach the new one.
+
+		  Multiple programs are allowed to be attached to a cgroup with
+		  **multi**. They are executed in FIFO order (those that were
+		  attached first, run first).
+
+		  Non-default *ATTACH_FLAGS* are supported by kernel version 4.14
+		  and later.
+
+		  *ATTACH_TYPE* can be on of:
+		  **ingress** ingress path of the inet socket (since 4.10);
+		  **egress** egress path of the inet socket (since 4.10);
+		  **sock_create** opening of an inet socket (since 4.10);
+		  **sock_ops** various socket operations (since 4.12);
+		  **device** device access (since 4.15).
+
+	**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+		  Detach *PROG* from the cgroup *CGROUP* and attach type
+		  *ATTACH_TYPE*.
+
+	**bpftool prog help**
+		  Print short help message.
+
+OPTIONS
+=======
+	-h, --help
+		  Print short generic help message (similar to **bpftool help**).
+
+	-v, --version
+		  Print version number (similar to **bpftool version**).
+
+	-j, --json
+		  Generate JSON output. For commands that cannot produce JSON, this
+		  option has no effect.
+
+	-p, --pretty
+		  Generate human-readable JSON output. Implies **-j**.
+
+	-f, --bpffs
+		  Show file names of pinned programs.
+
+EXAMPLES
+========
+|
+| **# mount -t bpf none /sys/fs/bpf/**
+| **# mkdir /sys/fs/cgroup/test.slice**
+| **# bpftool prog load ./device_cgroup.o /sys/fs/bpf/prog**
+| **# bpftool cgroup attach /sys/fs/cgroup/test.slice/ device id 1 allow_multi**
+
+**# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+    ID       AttachType      AttachFlags     Name
+    1        device          allow_multi     bpf_prog1
+
+|
+| **# bpftool cgroup detach /sys/fs/cgroup/test.slice/ device id 1**
+| **# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+    ID       AttachType      AttachFlags     Name
+
+SEE ALSO
+========
+	**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 9f51a26..0ab32b3 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -15,13 +15,13 @@
 	*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
 	*COMMANDS* :=
-	{ **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+	{ **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
 	| **pin** | **help** }
 
 MAP COMMANDS
 =============
 
-|	**bpftool** **map show**   [*MAP*]
+|	**bpftool** **map { show | list }**   [*MAP*]
 |	**bpftool** **map dump**    *MAP*
 |	**bpftool** **map update**  *MAP*  **key** *BYTES*   **value** *VALUE* [*UPDATE_FLAGS*]
 |	**bpftool** **map lookup**  *MAP*  **key** *BYTES*
@@ -36,7 +36,7 @@
 
 DESCRIPTION
 ===========
-	**bpftool map show**   [*MAP*]
+	**bpftool map { show | list }**   [*MAP*]
 		  Show information about loaded maps.  If *MAP* is specified
 		  show information only about given map, otherwise list all
 		  maps currently loaded on the system.
@@ -128,4 +128,4 @@
 
 SEE ALSO
 ========
-	**bpftool**\ (8), **bpftool-prog**\ (8)
+	**bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 36e8d1c..e4ceee7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -15,22 +15,23 @@
 	*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
 
 	*COMMANDS* :=
-	{ **show** | **dump xlated** | **dump jited** | **pin** | **help** }
+	{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | **help** }
 
 MAP COMMANDS
 =============
 
-|	**bpftool** **prog show** [*PROG*]
+|	**bpftool** **prog { show | list }** [*PROG*]
 |	**bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}]
 |	**bpftool** **prog dump jited**  *PROG* [{**file** *FILE* | **opcodes**}]
 |	**bpftool** **prog pin** *PROG* *FILE*
+|	**bpftool** **prog load** *OBJ* *FILE*
 |	**bpftool** **prog help**
 |
 |	*PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
 
 DESCRIPTION
 ===========
-	**bpftool prog show** [*PROG*]
+	**bpftool prog { show | list }** [*PROG*]
 		  Show information about loaded programs.  If *PROG* is
 		  specified show information only about given program, otherwise
 		  list all programs currently loaded on the system.
@@ -57,6 +58,11 @@
 
 		  Note: *FILE* must be located in *bpffs* mount.
 
+	**bpftool prog load** *OBJ* *FILE*
+		  Load bpf program from binary *OBJ* and pin as *FILE*.
+
+		  Note: *FILE* must be located in *bpffs* mount.
+
 	**bpftool prog help**
 		  Print short help message.
 
@@ -126,8 +132,10 @@
 |
 | **# mount -t bpf none /sys/fs/bpf/**
 | **# bpftool prog pin id 10 /sys/fs/bpf/prog**
+| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
 | **# ls -l /sys/fs/bpf/**
 |   -rw------- 1 root root 0 Jul 22 01:43 prog
+|   -rw------- 1 root root 0 Jul 22 01:44 prog2
 
 **# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes**
 
@@ -147,4 +155,4 @@
 
 SEE ALSO
 ========
-	**bpftool**\ (8), **bpftool-map**\ (8)
+	**bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 926c03d..20689a3 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -16,17 +16,19 @@
 
 	**bpftool** **version**
 
-	*OBJECT* := { **map** | **program** }
+	*OBJECT* := { **map** | **program** | **cgroup** }
 
 	*OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
 	| { **-j** | **--json** } [{ **-p** | **--pretty** }] }
 
 	*MAP-COMMANDS* :=
-	{ **show** | **dump** | **update** | **lookup** | **getnext** | **delete**
+	{ **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete**
 	| **pin** | **help** }
 
-	*PROG-COMMANDS* := { **show** | **dump jited** | **dump xlated** | **pin**
-	| **help** }
+	*PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin**
+	| **load** | **help** }
+
+	*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
 
 DESCRIPTION
 ===========
@@ -53,4 +55,4 @@
 
 SEE ALSO
 ========
-	**bpftool-map**\ (8), **bpftool-prog**\ (8)
+	**bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index ec3052c..2237bc4 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -1,25 +1,10 @@
 include ../../scripts/Makefile.include
-
 include ../../scripts/utilities.mak
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-ifneq ($(objtree),)
-#$(info Determined 'objtree' to be $(objtree))
-endif
-
-ifneq ($(OUTPUT),)
-#$(info Determined 'OUTPUT' to be $(OUTPUT))
-# Adding $(OUTPUT) as a directory to look for source files,
-# because use generated output files as sources dependency
-# for flex/bison parsers.
-VPATH += $(OUTPUT)
-export VPATH
 endif
 
 ifeq ($(V),1)
@@ -28,16 +13,18 @@
   Q = @
 endif
 
-BPF_DIR	= $(srctree)/tools/lib/bpf/
+BPF_DIR = $(srctree)/tools/lib/bpf/
 
 ifneq ($(OUTPUT),)
-  BPF_PATH=$(OUTPUT)
+  BPF_PATH = $(OUTPUT)
 else
-  BPF_PATH=$(BPF_DIR)
+  BPF_PATH = $(BPF_DIR)
 endif
 
 LIBBPF = $(BPF_PATH)libbpf.a
 
+BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
+
 $(LIBBPF): FORCE
 	$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
 
@@ -45,7 +32,7 @@
 	$(call QUIET_CLEAN, libbpf)
 	$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null
 
-prefix = /usr/local
+prefix ?= /usr/local
 bash_compdir ?= /usr/share/bash-completion/completions
 
 CC = gcc
@@ -53,14 +40,42 @@
 CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow
 CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/
+CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
 LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
 
+INSTALL ?= install
+RM ?= rm -f
+
+FEATURE_USER = .bpftool
+FEATURE_TESTS = libbfd disassembler-four-args
+FEATURE_DISPLAY = libbfd disassembler-four-args
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+  check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+
 include $(wildcard *.d)
 
 all: $(OUTPUT)bpftool
 
-SRCS=$(wildcard *.c)
-OBJS=$(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+SRCS = $(wildcard *.c)
+OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
 
 $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
 	$(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
@@ -73,21 +88,34 @@
 
 clean: $(LIBBPF)-clean
 	$(call QUIET_CLEAN, bpftool)
-	$(Q)rm -rf $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+	$(Q)$(RM) $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
 
-install:
-	install -m 0755 -d $(prefix)/sbin
-	install $(OUTPUT)bpftool $(prefix)/sbin/bpftool
-	install -m 0755 -d $(bash_compdir)
-	install -m 0644 bash-completion/bpftool $(bash_compdir)
+install: $(OUTPUT)bpftool
+	$(call QUIET_INSTALL, bpftool)
+	$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
+	$(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
+	$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
+	$(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
+
+uninstall:
+	$(call QUIET_UNINST, bpftool)
+	$(Q)$(RM) $(DESTDIR)$(prefix)/sbin/bpftool
+	$(Q)$(RM) $(DESTDIR)$(bash_compdir)/bpftool
 
 doc:
-	$(Q)$(MAKE) -C Documentation/
+	$(call descend,Documentation)
+
+doc-clean:
+	$(call descend,Documentation,clean)
 
 doc-install:
-	$(Q)$(MAKE) -C Documentation/ install
+	$(call descend,Documentation,install)
+
+doc-uninstall:
+	$(call descend,Documentation,uninstall)
 
 FORCE:
 
-.PHONY: all clean FORCE install doc doc-install
+.PHONY: all FORCE clean install uninstall
+.PHONY: doc doc-clean doc-install doc-uninstall
 .DEFAULT_GOAL := all
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 7febee0..0137866 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -197,7 +197,7 @@
 
             local PROG_TYPE='id pinned tag'
             case $command in
-                show)
+                show|list)
                     [[ $prev != "$command" ]] && return 0
                     COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
                     return 0
@@ -232,7 +232,7 @@
                     ;;
                 *)
                     [[ $prev == $object ]] && \
-                        COMPREPLY=( $( compgen -W 'dump help pin show' -- \
+                        COMPREPLY=( $( compgen -W 'dump help pin show list' -- \
                             "$cur" ) )
                     ;;
             esac
@@ -240,7 +240,7 @@
         map)
             local MAP_TYPE='id pinned'
             case $command in
-                show|dump)
+                show|list|dump)
                     case $prev in
                         $command)
                             COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
@@ -343,7 +343,7 @@
                 *)
                     [[ $prev == $object ]] && \
                         COMPREPLY=( $( compgen -W 'delete dump getnext help \
-                            lookup pin show update' -- "$cur" ) )
+                            lookup pin show list update' -- "$cur" ) )
                     ;;
             esac
             ;;
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
new file mode 100644
index 0000000..cae32a6
--- /dev/null
+++ b/tools/bpf/bpftool/cgroup.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (C) 2017 Facebook
+// Author: Roman Gushchin <guro@fb.com>
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <bpf.h>
+
+#include "main.h"
+
+#define HELP_SPEC_ATTACH_FLAGS						\
+	"ATTACH_FLAGS := { multi | override }"
+
+#define HELP_SPEC_ATTACH_TYPES						\
+	"ATTACH_TYPE := { ingress | egress | sock_create | sock_ops | device }"
+
+static const char * const attach_type_strings[] = {
+	[BPF_CGROUP_INET_INGRESS] = "ingress",
+	[BPF_CGROUP_INET_EGRESS] = "egress",
+	[BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
+	[BPF_CGROUP_SOCK_OPS] = "sock_ops",
+	[BPF_CGROUP_DEVICE] = "device",
+	[__MAX_BPF_ATTACH_TYPE] = NULL,
+};
+
+static enum bpf_attach_type parse_attach_type(const char *str)
+{
+	enum bpf_attach_type type;
+
+	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+		if (attach_type_strings[type] &&
+		    is_prefix(str, attach_type_strings[type]))
+			return type;
+	}
+
+	return __MAX_BPF_ATTACH_TYPE;
+}
+
+static int show_bpf_prog(int id, const char *attach_type_str,
+			 const char *attach_flags_str)
+{
+	struct bpf_prog_info info = {};
+	__u32 info_len = sizeof(info);
+	int prog_fd;
+
+	prog_fd = bpf_prog_get_fd_by_id(id);
+	if (prog_fd < 0)
+		return -1;
+
+	if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+		close(prog_fd);
+		return -1;
+	}
+
+	if (json_output) {
+		jsonw_start_object(json_wtr);
+		jsonw_uint_field(json_wtr, "id", info.id);
+		jsonw_string_field(json_wtr, "attach_type",
+				   attach_type_str);
+		jsonw_string_field(json_wtr, "attach_flags",
+				   attach_flags_str);
+		jsonw_string_field(json_wtr, "name", info.name);
+		jsonw_end_object(json_wtr);
+	} else {
+		printf("%-8u %-15s %-15s %-15s\n", info.id,
+		       attach_type_str,
+		       attach_flags_str,
+		       info.name);
+	}
+
+	close(prog_fd);
+	return 0;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+	__u32 prog_ids[1024] = {0};
+	char *attach_flags_str;
+	__u32 prog_cnt, iter;
+	__u32 attach_flags;
+	char buf[32];
+	int ret;
+
+	prog_cnt = ARRAY_SIZE(prog_ids);
+	ret = bpf_prog_query(cgroup_fd, type, 0, &attach_flags, prog_ids,
+			     &prog_cnt);
+	if (ret)
+		return ret;
+
+	if (prog_cnt == 0)
+		return 0;
+
+	switch (attach_flags) {
+	case BPF_F_ALLOW_MULTI:
+		attach_flags_str = "multi";
+		break;
+	case BPF_F_ALLOW_OVERRIDE:
+		attach_flags_str = "override";
+		break;
+	case 0:
+		attach_flags_str = "";
+		break;
+	default:
+		snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
+		attach_flags_str = buf;
+	}
+
+	for (iter = 0; iter < prog_cnt; iter++)
+		show_bpf_prog(prog_ids[iter], attach_type_strings[type],
+			      attach_flags_str);
+
+	return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+	enum bpf_attach_type type;
+	int cgroup_fd;
+	int ret = -1;
+
+	if (argc < 1) {
+		p_err("too few parameters for cgroup show");
+		goto exit;
+	} else if (argc > 1) {
+		p_err("too many parameters for cgroup show");
+		goto exit;
+	}
+
+	cgroup_fd = open(argv[0], O_RDONLY);
+	if (cgroup_fd < 0) {
+		p_err("can't open cgroup %s", argv[1]);
+		goto exit;
+	}
+
+	if (json_output)
+		jsonw_start_array(json_wtr);
+	else
+		printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
+		       "AttachFlags", "Name");
+
+	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+		/*
+		 * Not all attach types may be supported, so it's expected,
+		 * that some requests will fail.
+		 * If we were able to get the show for at least one
+		 * attach type, let's return 0.
+		 */
+		if (show_attached_bpf_progs(cgroup_fd, type) == 0)
+			ret = 0;
+	}
+
+	if (json_output)
+		jsonw_end_array(json_wtr);
+
+	close(cgroup_fd);
+exit:
+	return ret;
+}
+
+static int do_attach(int argc, char **argv)
+{
+	enum bpf_attach_type attach_type;
+	int cgroup_fd, prog_fd;
+	int attach_flags = 0;
+	int ret = -1;
+	int i;
+
+	if (argc < 4) {
+		p_err("too few parameters for cgroup attach");
+		goto exit;
+	}
+
+	cgroup_fd = open(argv[0], O_RDONLY);
+	if (cgroup_fd < 0) {
+		p_err("can't open cgroup %s", argv[1]);
+		goto exit;
+	}
+
+	attach_type = parse_attach_type(argv[1]);
+	if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+		p_err("invalid attach type");
+		goto exit_cgroup;
+	}
+
+	argc -= 2;
+	argv = &argv[2];
+	prog_fd = prog_parse_fd(&argc, &argv);
+	if (prog_fd < 0)
+		goto exit_cgroup;
+
+	for (i = 0; i < argc; i++) {
+		if (is_prefix(argv[i], "multi")) {
+			attach_flags |= BPF_F_ALLOW_MULTI;
+		} else if (is_prefix(argv[i], "override")) {
+			attach_flags |= BPF_F_ALLOW_OVERRIDE;
+		} else {
+			p_err("unknown option: %s", argv[i]);
+			goto exit_cgroup;
+		}
+	}
+
+	if (bpf_prog_attach(prog_fd, cgroup_fd, attach_type, attach_flags)) {
+		p_err("failed to attach program");
+		goto exit_prog;
+	}
+
+	if (json_output)
+		jsonw_null(json_wtr);
+
+	ret = 0;
+
+exit_prog:
+	close(prog_fd);
+exit_cgroup:
+	close(cgroup_fd);
+exit:
+	return ret;
+}
+
+static int do_detach(int argc, char **argv)
+{
+	enum bpf_attach_type attach_type;
+	int prog_fd, cgroup_fd;
+	int ret = -1;
+
+	if (argc < 4) {
+		p_err("too few parameters for cgroup detach");
+		goto exit;
+	}
+
+	cgroup_fd = open(argv[0], O_RDONLY);
+	if (cgroup_fd < 0) {
+		p_err("can't open cgroup %s", argv[1]);
+		goto exit;
+	}
+
+	attach_type = parse_attach_type(argv[1]);
+	if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+		p_err("invalid attach type");
+		goto exit_cgroup;
+	}
+
+	argc -= 2;
+	argv = &argv[2];
+	prog_fd = prog_parse_fd(&argc, &argv);
+	if (prog_fd < 0)
+		goto exit_cgroup;
+
+	if (bpf_prog_detach2(prog_fd, cgroup_fd, attach_type)) {
+		p_err("failed to detach program");
+		goto exit_prog;
+	}
+
+	if (json_output)
+		jsonw_null(json_wtr);
+
+	ret = 0;
+
+exit_prog:
+	close(prog_fd);
+exit_cgroup:
+	close(cgroup_fd);
+exit:
+	return ret;
+}
+
+static int do_help(int argc, char **argv)
+{
+	if (json_output) {
+		jsonw_null(json_wtr);
+		return 0;
+	}
+
+	fprintf(stderr,
+		"Usage: %s %s { show | list } CGROUP\n"
+		"       %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
+		"       %s %s detach CGROUP ATTACH_TYPE PROG\n"
+		"       %s %s help\n"
+		"\n"
+		"       " HELP_SPEC_ATTACH_TYPES "\n"
+		"       " HELP_SPEC_ATTACH_FLAGS "\n"
+		"       " HELP_SPEC_PROGRAM "\n"
+		"       " HELP_SPEC_OPTIONS "\n"
+		"",
+		bin_name, argv[-2], bin_name, argv[-2],
+		bin_name, argv[-2], bin_name, argv[-2]);
+
+	return 0;
+}
+
+static const struct cmd cmds[] = {
+	{ "show",	do_show },
+	{ "list",	do_show },
+	{ "attach",	do_attach },
+	{ "detach",	do_detach },
+	{ "help",	do_help },
+	{ 0 }
+};
+
+int do_cgroup(int argc, char **argv)
+{
+	return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 2bd3b28..6601c95 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -44,7 +44,9 @@
 #include <unistd.h>
 #include <linux/limits.h>
 #include <linux/magic.h>
+#include <net/if.h>
 #include <sys/mount.h>
+#include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/vfs.h>
 
@@ -163,13 +165,49 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
 	return fd;
 }
 
-int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+int do_pin_fd(int fd, const char *name)
 {
 	char err_str[ERR_MAX_LEN];
-	unsigned int id;
-	char *endptr;
 	char *file;
 	char *dir;
+	int err = 0;
+
+	err = bpf_obj_pin(fd, name);
+	if (!err)
+		goto out;
+
+	file = malloc(strlen(name) + 1);
+	strcpy(file, name);
+	dir = dirname(file);
+
+	if (errno != EPERM || is_bpffs(dir)) {
+		p_err("can't pin the object (%s): %s", name, strerror(errno));
+		goto out_free;
+	}
+
+	/* Attempt to mount bpffs, then retry pinning. */
+	err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
+	if (!err) {
+		err = bpf_obj_pin(fd, name);
+		if (err)
+			p_err("can't pin the object (%s): %s", name,
+			      strerror(errno));
+	} else {
+		err_str[ERR_MAX_LEN - 1] = '\0';
+		p_err("can't mount BPF file system to pin the object (%s): %s",
+		      name, err_str);
+	}
+
+out_free:
+	free(file);
+out:
+	return err;
+}
+
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+{
+	unsigned int id;
+	char *endptr;
 	int err;
 	int fd;
 
@@ -195,35 +233,8 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
 		return -1;
 	}
 
-	err = bpf_obj_pin(fd, *argv);
-	if (!err)
-		goto out_close;
+	err = do_pin_fd(fd, *argv);
 
-	file = malloc(strlen(*argv) + 1);
-	strcpy(file, *argv);
-	dir = dirname(file);
-
-	if (errno != EPERM || is_bpffs(dir)) {
-		p_err("can't pin the object (%s): %s", *argv, strerror(errno));
-		goto out_free;
-	}
-
-	/* Attempt to mount bpffs, then retry pinning. */
-	err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
-	if (!err) {
-		err = bpf_obj_pin(fd, *argv);
-		if (err)
-			p_err("can't pin the object (%s): %s", *argv,
-			      strerror(errno));
-	} else {
-		err_str[ERR_MAX_LEN - 1] = '\0';
-		p_err("can't mount BPF file system to pin the object (%s): %s",
-		      *argv, err_str);
-	}
-
-out_free:
-	free(file);
-out_close:
 	close(fd);
 	return err;
 }
@@ -403,3 +414,53 @@ void delete_pinned_obj_table(struct pinned_obj_table *tab)
 		free(obj);
 	}
 }
+
+static char *
+ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
+{
+	struct stat st;
+	int err;
+
+	err = stat("/proc/self/ns/net", &st);
+	if (err) {
+		p_err("Can't stat /proc/self: %s", strerror(errno));
+		return NULL;
+	}
+
+	if (st.st_dev != ns_dev || st.st_ino != ns_ino)
+		return NULL;
+
+	return if_indextoname(ifindex, buf);
+}
+
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+	char name[IF_NAMESIZE];
+
+	if (!ifindex)
+		return;
+
+	printf(" dev ");
+	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+		printf("%s", name);
+	else
+		printf("ifindex %u ns_dev %llu ns_ino %llu",
+		       ifindex, ns_dev, ns_inode);
+}
+
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+	char name[IF_NAMESIZE];
+
+	if (!ifindex)
+		return;
+
+	jsonw_name(json_wtr, "dev");
+	jsonw_start_object(json_wtr);
+	jsonw_uint_field(json_wtr, "ifindex", ifindex);
+	jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
+	jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
+	if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+		jsonw_string_field(json_wtr, "ifname", name);
+	jsonw_end_object(json_wtr);
+}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index 1551d39..57d32e8 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -107,7 +107,14 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes)
 
 	disassemble_init_for_target(&info);
 
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+	disassemble = disassembler(info.arch,
+				   bfd_big_endian(bfdf),
+				   info.mach,
+				   bfdf);
+#else
 	disassemble = disassembler(bfdf);
+#endif
 	assert(disassemble);
 
 	if (json_output)
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index d294bc8..3a0396d 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -38,7 +38,6 @@
 #include <errno.h>
 #include <getopt.h>
 #include <linux/bpf.h>
-#include <linux/version.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -85,7 +84,7 @@ static int do_help(int argc, char **argv)
 		"       %s batch file FILE\n"
 		"       %s version\n"
 		"\n"
-		"       OBJECT := { prog | map }\n"
+		"       OBJECT := { prog | map | cgroup }\n"
 		"       " HELP_SPEC_OPTIONS "\n"
 		"",
 		bin_name, bin_name, bin_name);
@@ -95,21 +94,13 @@ static int do_help(int argc, char **argv)
 
 static int do_version(int argc, char **argv)
 {
-	unsigned int version[3];
-
-	version[0] = LINUX_VERSION_CODE >> 16;
-	version[1] = LINUX_VERSION_CODE >> 8 & 0xf;
-	version[2] = LINUX_VERSION_CODE & 0xf;
-
 	if (json_output) {
 		jsonw_start_object(json_wtr);
 		jsonw_name(json_wtr, "version");
-		jsonw_printf(json_wtr, "\"%u.%u.%u\"",
-			     version[0], version[1], version[2]);
+		jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
 		jsonw_end_object(json_wtr);
 	} else {
-		printf("%s v%u.%u.%u\n", bin_name,
-		       version[0], version[1], version[2]);
+		printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
 	}
 	return 0;
 }
@@ -173,6 +164,7 @@ static const struct cmd cmds[] = {
 	{ "batch",	do_batch },
 	{ "prog",	do_prog },
 	{ "map",	do_map },
+	{ "cgroup",	do_cgroup },
 	{ "version",	do_version },
 	{ 0 }
 };
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index bff330b..65b526f 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -96,6 +96,8 @@ struct pinned_obj {
 int build_pinned_obj_table(struct pinned_obj_table *table,
 			   enum bpf_obj_type type);
 void delete_pinned_obj_table(struct pinned_obj_table *tab);
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
 
 struct cmd {
 	const char *cmd;
@@ -111,9 +113,11 @@ char *get_fdinfo(int fd, const char *key);
 int open_obj_pinned(char *path);
 int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
 int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
+int do_pin_fd(int fd, const char *name);
 
 int do_prog(int argc, char **arg);
 int do_map(int argc, char **arg);
+int do_cgroup(int argc, char **arg);
 
 int prog_parse_fd(int *argc, char ***argv);
 
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index a8c3a33..8d7db9d 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -861,7 +861,7 @@ static int do_help(int argc, char **argv)
 	}
 
 	fprintf(stderr,
-		"Usage: %s %s show   [MAP]\n"
+		"Usage: %s %s { show | list }   [MAP]\n"
 		"       %s %s dump    MAP\n"
 		"       %s %s update  MAP  key BYTES value VALUE [UPDATE_FLAGS]\n"
 		"       %s %s lookup  MAP  key BYTES\n"
@@ -885,6 +885,7 @@ static int do_help(int argc, char **argv)
 
 static const struct cmd cmds[] = {
 	{ "show",	do_show },
+	{ "list",	do_show },
 	{ "help",	do_help },
 	{ "dump",	do_dump },
 	{ "update",	do_update },
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index dded773..c6a28be 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -45,6 +45,7 @@
 #include <sys/stat.h>
 
 #include <bpf.h>
+#include <libbpf.h>
 
 #include "main.h"
 #include "disasm.h"
@@ -229,6 +230,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
 		     info->tag[0], info->tag[1], info->tag[2], info->tag[3],
 		     info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
 
+	print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
 	if (info->load_time) {
 		char buf[32];
 
@@ -286,6 +289,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
 
 	printf("tag ");
 	fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
+	print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
 	printf("\n");
 
 	if (info->load_time) {
@@ -402,6 +406,88 @@ static int do_show(int argc, char **argv)
 	return err;
 }
 
+#define SYM_MAX_NAME	256
+
+struct kernel_sym {
+	unsigned long address;
+	char name[SYM_MAX_NAME];
+};
+
+struct dump_data {
+	unsigned long address_call_base;
+	struct kernel_sym *sym_mapping;
+	__u32 sym_count;
+	char scratch_buff[SYM_MAX_NAME];
+};
+
+static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
+{
+	return ((struct kernel_sym *)sym_a)->address -
+	       ((struct kernel_sym *)sym_b)->address;
+}
+
+static void kernel_syms_load(struct dump_data *dd)
+{
+	struct kernel_sym *sym;
+	char buff[256];
+	void *tmp, *address;
+	FILE *fp;
+
+	fp = fopen("/proc/kallsyms", "r");
+	if (!fp)
+		return;
+
+	while (!feof(fp)) {
+		if (!fgets(buff, sizeof(buff), fp))
+			break;
+		tmp = realloc(dd->sym_mapping,
+			      (dd->sym_count + 1) *
+			      sizeof(*dd->sym_mapping));
+		if (!tmp) {
+out:
+			free(dd->sym_mapping);
+			dd->sym_mapping = NULL;
+			fclose(fp);
+			return;
+		}
+		dd->sym_mapping = tmp;
+		sym = &dd->sym_mapping[dd->sym_count];
+		if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
+			continue;
+		sym->address = (unsigned long)address;
+		if (!strcmp(sym->name, "__bpf_call_base")) {
+			dd->address_call_base = sym->address;
+			/* sysctl kernel.kptr_restrict was set */
+			if (!sym->address)
+				goto out;
+		}
+		if (sym->address)
+			dd->sym_count++;
+	}
+
+	fclose(fp);
+
+	qsort(dd->sym_mapping, dd->sym_count,
+	      sizeof(*dd->sym_mapping), kernel_syms_cmp);
+}
+
+static void kernel_syms_destroy(struct dump_data *dd)
+{
+	free(dd->sym_mapping);
+}
+
+static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
+					     unsigned long key)
+{
+	struct kernel_sym sym = {
+		.address = key,
+	};
+
+	return dd->sym_mapping ?
+	       bsearch(&sym, dd->sym_mapping, dd->sym_count,
+		       sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
+}
+
 static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
 {
 	va_list args;
@@ -411,8 +497,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
 	va_end(args);
 }
 
-static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
+static const char *print_call_pcrel(struct dump_data *dd,
+				    struct kernel_sym *sym,
+				    unsigned long address,
+				    const struct bpf_insn *insn)
 {
+	if (sym)
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "%+d#%s", insn->off, sym->name);
+	else
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "%+d#0x%lx", insn->off, address);
+	return dd->scratch_buff;
+}
+
+static const char *print_call_helper(struct dump_data *dd,
+				     struct kernel_sym *sym,
+				     unsigned long address)
+{
+	if (sym)
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "%s", sym->name);
+	else
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "0x%lx", address);
+	return dd->scratch_buff;
+}
+
+static const char *print_call(void *private_data,
+			      const struct bpf_insn *insn)
+{
+	struct dump_data *dd = private_data;
+	unsigned long address = dd->address_call_base + insn->imm;
+	struct kernel_sym *sym;
+
+	sym = kernel_syms_search(dd, address);
+	if (insn->src_reg == BPF_PSEUDO_CALL)
+		return print_call_pcrel(dd, sym, address, insn);
+	else
+		return print_call_helper(dd, sym, address);
+}
+
+static const char *print_imm(void *private_data,
+			     const struct bpf_insn *insn,
+			     __u64 full_imm)
+{
+	struct dump_data *dd = private_data;
+
+	if (insn->src_reg == BPF_PSEUDO_MAP_FD)
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "map[id:%u]", insn->imm);
+	else
+		snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+			 "0x%llx", (unsigned long long)full_imm);
+	return dd->scratch_buff;
+}
+
+static void dump_xlated_plain(struct dump_data *dd, void *buf,
+			      unsigned int len, bool opcodes)
+{
+	const struct bpf_insn_cbs cbs = {
+		.cb_print	= print_insn,
+		.cb_call	= print_call,
+		.cb_imm		= print_imm,
+		.private_data	= dd,
+	};
 	struct bpf_insn *insn = buf;
 	bool double_insn = false;
 	unsigned int i;
@@ -426,7 +575,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
 		double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
 
 		printf("% 4d: ", i);
-		print_bpf_insn(print_insn, NULL, insn + i, true);
+		print_bpf_insn(&cbs, NULL, insn + i, true);
 
 		if (opcodes) {
 			printf("       ");
@@ -455,8 +604,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
 	va_end(args);
 }
 
-static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
+static void dump_xlated_json(struct dump_data *dd, void *buf,
+			     unsigned int len, bool opcodes)
 {
+	const struct bpf_insn_cbs cbs = {
+		.cb_print	= print_insn_json,
+		.cb_call	= print_call,
+		.cb_imm		= print_imm,
+		.private_data	= dd,
+	};
 	struct bpf_insn *insn = buf;
 	bool double_insn = false;
 	unsigned int i;
@@ -471,7 +627,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
 
 		jsonw_start_object(json_wtr);
 		jsonw_name(json_wtr, "disasm");
-		print_bpf_insn(print_insn_json, NULL, insn + i, true);
+		print_bpf_insn(&cbs, NULL, insn + i, true);
 
 		if (opcodes) {
 			jsonw_name(json_wtr, "opcodes");
@@ -506,6 +662,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
 static int do_dump(int argc, char **argv)
 {
 	struct bpf_prog_info info = {};
+	struct dump_data dd = {};
 	__u32 len = sizeof(info);
 	unsigned int buf_size;
 	char *filepath = NULL;
@@ -593,6 +750,14 @@ static int do_dump(int argc, char **argv)
 		goto err_free;
 	}
 
+	if ((member_len == &info.jited_prog_len &&
+	     info.jited_prog_insns == 0) ||
+	    (member_len == &info.xlated_prog_len &&
+	     info.xlated_prog_insns == 0)) {
+		p_err("error retrieving insn dump: kernel.kptr_restrict set?");
+		goto err_free;
+	}
+
 	if (filepath) {
 		fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
 		if (fd < 0) {
@@ -609,17 +774,19 @@ static int do_dump(int argc, char **argv)
 			goto err_free;
 		}
 	} else {
-		if (member_len == &info.jited_prog_len)
+		if (member_len == &info.jited_prog_len) {
 			disasm_print_insn(buf, *member_len, opcodes);
-		else
+		} else {
+			kernel_syms_load(&dd);
 			if (json_output)
-				dump_xlated_json(buf, *member_len, opcodes);
+				dump_xlated_json(&dd, buf, *member_len, opcodes);
 			else
-				dump_xlated_plain(buf, *member_len, opcodes);
+				dump_xlated_plain(&dd, buf, *member_len, opcodes);
+			kernel_syms_destroy(&dd);
+		}
 	}
 
 	free(buf);
-
 	return 0;
 
 err_free:
@@ -637,6 +804,30 @@ static int do_pin(int argc, char **argv)
 	return err;
 }
 
+static int do_load(int argc, char **argv)
+{
+	struct bpf_object *obj;
+	int prog_fd;
+
+	if (argc != 2)
+		usage();
+
+	if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
+		p_err("failed to load program");
+		return -1;
+	}
+
+	if (do_pin_fd(prog_fd, argv[1])) {
+		p_err("failed to pin program");
+		return -1;
+	}
+
+	if (json_output)
+		jsonw_null(json_wtr);
+
+	return 0;
+}
+
 static int do_help(int argc, char **argv)
 {
 	if (json_output) {
@@ -645,26 +836,29 @@ static int do_help(int argc, char **argv)
 	}
 
 	fprintf(stderr,
-		"Usage: %s %s show [PROG]\n"
+		"Usage: %s %s { show | list } [PROG]\n"
 		"       %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
 		"       %s %s dump jited  PROG [{ file FILE | opcodes }]\n"
 		"       %s %s pin   PROG FILE\n"
+		"       %s %s load  OBJ  FILE\n"
 		"       %s %s help\n"
 		"\n"
 		"       " HELP_SPEC_PROGRAM "\n"
 		"       " HELP_SPEC_OPTIONS "\n"
 		"",
 		bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
-		bin_name, argv[-2], bin_name, argv[-2]);
+		bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
 
 	return 0;
 }
 
 static const struct cmd cmds[] = {
 	{ "show",	do_show },
+	{ "list",	do_show },
 	{ "help",	do_help },
 	{ "dump",	do_dump },
 	{ "pin",	do_pin },
+	{ "load",	do_load },
 	{ 0 }
 };
 
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 9698264..17f2c73 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -13,6 +13,7 @@
          test-hello.bin                         \
          test-libaudit.bin                      \
          test-libbfd.bin                        \
+         test-disassembler-four-args.bin        \
          test-liberty.bin                       \
          test-liberty-z.bin                     \
          test-cplus-demangle.bin                \
@@ -188,6 +189,9 @@
 $(OUTPUT)test-libbfd.bin:
 	$(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
 
+$(OUTPUT)test-disassembler-four-args.bin:
+	$(BUILD) -lbfd -lopcodes
+
 $(OUTPUT)test-liberty.bin:
 	$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
 
diff --git a/tools/build/feature/test-disassembler-four-args.c b/tools/build/feature/test-disassembler-four-args.c
new file mode 100644
index 0000000..45ce65c
--- /dev/null
+++ b/tools/build/feature/test-disassembler-four-args.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bfd.h>
+#include <dis-asm.h>
+
+int main(void)
+{
+	bfd *abfd = bfd_openr(NULL, NULL);
+
+	disassembler(bfd_get_arch(abfd),
+		     bfd_big_endian(abfd),
+		     bfd_get_mach(abfd),
+		     abfd);
+
+	return 0;
+}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4c223ab..4e8c60a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -197,8 +197,14 @@ enum bpf_attach_type {
  */
 #define BPF_F_STRICT_ALIGNMENT	(1U << 0)
 
+/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
 #define BPF_PSEUDO_MAP_FD	1
 
+/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
+ * offset to another bpf function
+ */
+#define BPF_PSEUDO_CALL		1
+
 /* flags for BPF_MAP_UPDATE_ELEM command */
 #define BPF_ANY		0 /* create new element or update existing */
 #define BPF_NOEXIST	1 /* create new element if it didn't exist */
@@ -677,6 +683,10 @@ union bpf_attr {
  *     @buf: buf to fill
  *     @buf_size: size of the buf
  *     Return : 0 on success or negative error code
+ *
+ * int bpf_override_return(pt_regs, rc)
+ *	@pt_regs: pointer to struct pt_regs
+ *	@rc: the return value to set
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -736,7 +746,8 @@ union bpf_attr {
 	FN(xdp_adjust_meta),		\
 	FN(perf_event_read_value),	\
 	FN(perf_prog_read_value),	\
-	FN(getsockopt),
+	FN(getsockopt),			\
+	FN(override_return),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -910,6 +921,9 @@ struct bpf_prog_info {
 	__u32 nr_map_ids;
 	__aligned_u64 map_ids;
 	char name[BPF_OBJ_NAME_LEN];
+	__u32 ifindex;
+	__u64 netns_dev;
+	__u64 netns_ino;
 } __attribute__((aligned(8)));
 
 struct bpf_map_info {
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index b9a4953..7695336 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -418,6 +418,27 @@ struct perf_event_attr {
 	__u16	__reserved_2;	/* align to __u64 */
 };
 
+/*
+ * Structure used by below PERF_EVENT_IOC_QUERY_BPF command
+ * to query bpf programs attached to the same perf tracepoint
+ * as the given perf event.
+ */
+struct perf_event_query_bpf {
+	/*
+	 * The below ids array length
+	 */
+	__u32	ids_len;
+	/*
+	 * Set by the kernel to indicate the number of
+	 * available programs
+	 */
+	__u32	prog_cnt;
+	/*
+	 * User provided buffer to store program ids
+	 */
+	__u32	ids[0];
+};
+
 #define perf_flags(attr)	(*(&(attr)->read_format + 1))
 
 /*
@@ -433,6 +454,7 @@ struct perf_event_attr {
 #define PERF_EVENT_IOC_ID		_IOR('$', 7, __u64 *)
 #define PERF_EVENT_IOC_SET_BPF		_IOW('$', 8, __u32)
 #define PERF_EVENT_IOC_PAUSE_OUTPUT	_IOW('$', 9, __u32)
+#define PERF_EVENT_IOC_QUERY_BPF	_IOWR('$', 10, struct perf_event_query_bpf *)
 
 enum perf_event_ioc_flags {
 	PERF_IOC_FLAG_GROUP		= 1U << 0,
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 4555304..8ed43ae 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -213,10 +213,10 @@
 force:
 
 elfdep:
-	@if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit -1 ; fi
+	@if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit 1 ; fi
 
 bpfdep:
-	@if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit -1 ; fi
+	@if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
 
 # Declare the contents of the .PHONY variable as phony.  We keep that
 # information in a variable so we can use it in if_changed and friends.
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 6534889..9f44c196 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -40,7 +40,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
 			  __u32 map_flags);
 
 /* Recommend log buffer size */
-#define BPF_LOG_BUF_SIZE 65536
+#define BPF_LOG_BUF_SIZE (256 * 1024)
 int bpf_load_program_name(enum bpf_prog_type type, const char *name,
 			  const struct bpf_insn *insns,
 			  size_t insns_cnt, const char *license,
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5aa45f8..e9c4b7c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -174,12 +174,19 @@ struct bpf_program {
 	char *name;
 	char *section_name;
 	struct bpf_insn *insns;
-	size_t insns_cnt;
+	size_t insns_cnt, main_prog_cnt;
 	enum bpf_prog_type type;
 
-	struct {
+	struct reloc_desc {
+		enum {
+			RELO_LD64,
+			RELO_CALL,
+		} type;
 		int insn_idx;
-		int map_idx;
+		union {
+			int map_idx;
+			int text_off;
+		};
 	} *reloc_desc;
 	int nr_reloc;
 
@@ -234,6 +241,7 @@ struct bpf_object {
 		} *reloc;
 		int nr_reloc;
 		int maps_shndx;
+		int text_shndx;
 	} efile;
 	/*
 	 * All loaded bpf_object is linked in a list, which is
@@ -375,9 +383,13 @@ bpf_object__init_prog_names(struct bpf_object *obj)
 	size_t pi, si;
 
 	for (pi = 0; pi < obj->nr_programs; pi++) {
-		char *name = NULL;
+		const char *name = NULL;
 
 		prog = &obj->programs[pi];
+		if (prog->idx == obj->efile.text_shndx) {
+			name = ".text";
+			goto skip_search;
+		}
 
 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
 		     si++) {
@@ -387,6 +399,8 @@ bpf_object__init_prog_names(struct bpf_object *obj)
 				continue;
 			if (sym.st_shndx != prog->idx)
 				continue;
+			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
+				continue;
 
 			name = elf_strptr(obj->efile.elf,
 					  obj->efile.strtabidx,
@@ -403,7 +417,7 @@ bpf_object__init_prog_names(struct bpf_object *obj)
 				   prog->section_name);
 			return -EINVAL;
 		}
-
+skip_search:
 		prog->name = strdup(name);
 		if (!prog->name) {
 			pr_warning("failed to allocate memory for prog sym %s\n",
@@ -793,6 +807,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
 		} else if ((sh.sh_type == SHT_PROGBITS) &&
 			   (sh.sh_flags & SHF_EXECINSTR) &&
 			   (data->d_size > 0)) {
+			if (strcmp(name, ".text") == 0)
+				obj->efile.text_shndx = idx;
 			err = bpf_object__add_program(obj, data->d_buf,
 						      data->d_size, name, idx);
 			if (err) {
@@ -854,11 +870,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
 }
 
 static int
-bpf_program__collect_reloc(struct bpf_program *prog,
-			   size_t nr_maps, GElf_Shdr *shdr,
-			   Elf_Data *data, Elf_Data *symbols,
-			   int maps_shndx, struct bpf_map *maps)
+bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
+			   Elf_Data *data, struct bpf_object *obj)
 {
+	Elf_Data *symbols = obj->efile.symbols;
+	int text_shndx = obj->efile.text_shndx;
+	int maps_shndx = obj->efile.maps_shndx;
+	struct bpf_map *maps = obj->maps;
+	size_t nr_maps = obj->nr_maps;
 	int i, nrels;
 
 	pr_debug("collecting relocating info for: '%s'\n",
@@ -891,8 +910,11 @@ bpf_program__collect_reloc(struct bpf_program *prog,
 				   GELF_R_SYM(rel.r_info));
 			return -LIBBPF_ERRNO__FORMAT;
 		}
+		pr_debug("relo for %lld value %lld name %d\n",
+			 (long long) (rel.r_info >> 32),
+			 (long long) sym.st_value, sym.st_name);
 
-		if (sym.st_shndx != maps_shndx) {
+		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
 				   prog->section_name, sym.st_shndx);
 			return -LIBBPF_ERRNO__RELOC;
@@ -901,6 +923,17 @@ bpf_program__collect_reloc(struct bpf_program *prog,
 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
 
+		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
+			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
+				pr_warning("incorrect bpf_call opcode\n");
+				return -LIBBPF_ERRNO__RELOC;
+			}
+			prog->reloc_desc[i].type = RELO_CALL;
+			prog->reloc_desc[i].insn_idx = insn_idx;
+			prog->reloc_desc[i].text_off = sym.st_value;
+			continue;
+		}
+
 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
 				   insn_idx, insns[insn_idx].code);
@@ -922,6 +955,7 @@ bpf_program__collect_reloc(struct bpf_program *prog,
 			return -LIBBPF_ERRNO__RELOC;
 		}
 
+		prog->reloc_desc[i].type = RELO_LD64;
 		prog->reloc_desc[i].insn_idx = insn_idx;
 		prog->reloc_desc[i].map_idx = map_idx;
 	}
@@ -961,27 +995,76 @@ bpf_object__create_maps(struct bpf_object *obj)
 }
 
 static int
+bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
+			struct reloc_desc *relo)
+{
+	struct bpf_insn *insn, *new_insn;
+	struct bpf_program *text;
+	size_t new_cnt;
+
+	if (relo->type != RELO_CALL)
+		return -LIBBPF_ERRNO__RELOC;
+
+	if (prog->idx == obj->efile.text_shndx) {
+		pr_warning("relo in .text insn %d into off %d\n",
+			   relo->insn_idx, relo->text_off);
+		return -LIBBPF_ERRNO__RELOC;
+	}
+
+	if (prog->main_prog_cnt == 0) {
+		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
+		if (!text) {
+			pr_warning("no .text section found yet relo into text exist\n");
+			return -LIBBPF_ERRNO__RELOC;
+		}
+		new_cnt = prog->insns_cnt + text->insns_cnt;
+		new_insn = realloc(prog->insns, new_cnt * sizeof(*insn));
+		if (!new_insn) {
+			pr_warning("oom in prog realloc\n");
+			return -ENOMEM;
+		}
+		memcpy(new_insn + prog->insns_cnt, text->insns,
+		       text->insns_cnt * sizeof(*insn));
+		prog->insns = new_insn;
+		prog->main_prog_cnt = prog->insns_cnt;
+		prog->insns_cnt = new_cnt;
+	}
+	insn = &prog->insns[relo->insn_idx];
+	insn->imm += prog->main_prog_cnt - relo->insn_idx;
+	pr_debug("added %zd insn from %s to prog %s\n",
+		 text->insns_cnt, text->section_name, prog->section_name);
+	return 0;
+}
+
+static int
 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
 {
-	int i;
+	int i, err;
 
 	if (!prog || !prog->reloc_desc)
 		return 0;
 
 	for (i = 0; i < prog->nr_reloc; i++) {
-		int insn_idx, map_idx;
-		struct bpf_insn *insns = prog->insns;
+		if (prog->reloc_desc[i].type == RELO_LD64) {
+			struct bpf_insn *insns = prog->insns;
+			int insn_idx, map_idx;
 
-		insn_idx = prog->reloc_desc[i].insn_idx;
-		map_idx = prog->reloc_desc[i].map_idx;
+			insn_idx = prog->reloc_desc[i].insn_idx;
+			map_idx = prog->reloc_desc[i].map_idx;
 
-		if (insn_idx >= (int)prog->insns_cnt) {
-			pr_warning("relocation out of range: '%s'\n",
-				   prog->section_name);
-			return -LIBBPF_ERRNO__RELOC;
+			if (insn_idx >= (int)prog->insns_cnt) {
+				pr_warning("relocation out of range: '%s'\n",
+					   prog->section_name);
+				return -LIBBPF_ERRNO__RELOC;
+			}
+			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+			insns[insn_idx].imm = obj->maps[map_idx].fd;
+		} else {
+			err = bpf_program__reloc_text(prog, obj,
+						      &prog->reloc_desc[i]);
+			if (err)
+				return err;
 		}
-		insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
-		insns[insn_idx].imm = obj->maps[map_idx].fd;
 	}
 
 	zfree(&prog->reloc_desc);
@@ -1024,7 +1107,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
 		Elf_Data *data = obj->efile.reloc[i].data;
 		int idx = shdr->sh_info;
 		struct bpf_program *prog;
-		size_t nr_maps = obj->nr_maps;
 
 		if (shdr->sh_type != SHT_REL) {
 			pr_warning("internal error at %d\n", __LINE__);
@@ -1038,11 +1120,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
 			return -LIBBPF_ERRNO__RELOC;
 		}
 
-		err = bpf_program__collect_reloc(prog, nr_maps,
+		err = bpf_program__collect_reloc(prog,
 						 shdr, data,
-						 obj->efile.symbols,
-						 obj->efile.maps_shndx,
-						 obj->maps);
+						 obj);
 		if (err)
 			return err;
 	}
@@ -1195,6 +1275,8 @@ bpf_object__load_progs(struct bpf_object *obj)
 	int err;
 
 	for (i = 0; i < obj->nr_programs; i++) {
+		if (obj->programs[i].idx == obj->efile.text_shndx)
+			continue;
 		err = bpf_program__load(&obj->programs[i],
 					obj->license,
 					obj->kern_version);
@@ -1721,6 +1803,45 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
 
+#define BPF_PROG_SEC(string, type) { string, sizeof(string), type }
+static const struct {
+	const char *sec;
+	size_t len;
+	enum bpf_prog_type prog_type;
+} section_names[] = {
+	BPF_PROG_SEC("socket",		BPF_PROG_TYPE_SOCKET_FILTER),
+	BPF_PROG_SEC("kprobe/",		BPF_PROG_TYPE_KPROBE),
+	BPF_PROG_SEC("kretprobe/",	BPF_PROG_TYPE_KPROBE),
+	BPF_PROG_SEC("tracepoint/",	BPF_PROG_TYPE_TRACEPOINT),
+	BPF_PROG_SEC("xdp",		BPF_PROG_TYPE_XDP),
+	BPF_PROG_SEC("perf_event",	BPF_PROG_TYPE_PERF_EVENT),
+	BPF_PROG_SEC("cgroup/skb",	BPF_PROG_TYPE_CGROUP_SKB),
+	BPF_PROG_SEC("cgroup/sock",	BPF_PROG_TYPE_CGROUP_SOCK),
+	BPF_PROG_SEC("cgroup/dev",	BPF_PROG_TYPE_CGROUP_DEVICE),
+	BPF_PROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS),
+	BPF_PROG_SEC("sk_skb",		BPF_PROG_TYPE_SK_SKB),
+};
+#undef BPF_PROG_SEC
+
+static enum bpf_prog_type bpf_program__guess_type(struct bpf_program *prog)
+{
+	int i;
+
+	if (!prog->section_name)
+		goto err;
+
+	for (i = 0; i < ARRAY_SIZE(section_names); i++)
+		if (strncmp(prog->section_name, section_names[i].sec,
+			    section_names[i].len) == 0)
+			return section_names[i].prog_type;
+
+err:
+	pr_warning("failed to guess program type based on section name %s\n",
+		   prog->section_name);
+
+	return BPF_PROG_TYPE_UNSPEC;
+}
+
 int bpf_map__fd(struct bpf_map *map)
 {
 	return map ? map->fd : -EINVAL;
@@ -1818,7 +1939,7 @@ long libbpf_get_error(const void *ptr)
 int bpf_prog_load(const char *file, enum bpf_prog_type type,
 		  struct bpf_object **pobj, int *prog_fd)
 {
-	struct bpf_program *prog;
+	struct bpf_program *prog, *first_prog = NULL;
 	struct bpf_object *obj;
 	int err;
 
@@ -1826,13 +1947,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
 	if (IS_ERR(obj))
 		return -ENOENT;
 
-	prog = bpf_program__next(NULL, obj);
-	if (!prog) {
+	bpf_object__for_each_program(prog, obj) {
+		/*
+		 * If type is not specified, try to guess it based on
+		 * section name.
+		 */
+		if (type == BPF_PROG_TYPE_UNSPEC) {
+			type = bpf_program__guess_type(prog);
+			if (type == BPF_PROG_TYPE_UNSPEC) {
+				bpf_object__close(obj);
+				return -EINVAL;
+			}
+		}
+
+		bpf_program__set_type(prog, type);
+		if (prog->idx != obj->efile.text_shndx && !first_prog)
+			first_prog = prog;
+	}
+
+	if (!first_prog) {
+		pr_warning("object file doesn't contain bpf program\n");
 		bpf_object__close(obj);
 		return -ENOENT;
 	}
 
-	bpf_program__set_type(prog, type);
 	err = bpf_object__load(obj);
 	if (err) {
 		bpf_object__close(obj);
@@ -1840,6 +1978,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
 	}
 
 	*pobj = obj;
-	*prog_fd = bpf_program__fd(prog);
+	*prog_fd = bpf_program__fd(first_prog);
 	return 0;
 }
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 3fab179..fcb3ed0 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -99,5 +99,6 @@
 
 	QUIET_CLEAN    = @printf '  CLEAN    %s\n' $1;
 	QUIET_INSTALL  = @printf '  INSTALL  %s\n' $1;
+	QUIET_UNINST   = @printf '  UNINST   %s\n' $1;
   endif
 endif
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 9316e64..a8aa7e2 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -18,9 +18,11 @@
 
 TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
 	test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o     \
-	sockmap_verdict_prog.o dev_cgroup.o
+	sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
+	test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o
 
-TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh
+TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
+	test_offload.py
 
 include ../lib.mk
 
@@ -49,8 +51,13 @@
   CPU ?= generic
 endif
 
+CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
+	      -Wno-compare-distinct-pointer-types
+
+$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
+$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
+
 %.o: %.c
-	$(CLANG) -I. -I./include/uapi -I../../../include/uapi \
-		 -Wno-compare-distinct-pointer-types          \
+	$(CLANG) $(CLANG_FLAGS) \
 		 -O2 -target bpf -emit-llvm -c $< -o - |      \
 	$(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index fd9a17f..33cb00e 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -82,7 +82,8 @@ static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
 static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
 				       unsigned int buf_size) =
 	(void *) BPF_FUNC_perf_prog_read_value;
-
+static int (*bpf_override_return)(void *ctx, unsigned long rc) =
+	(void *) BPF_FUNC_override_return;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 52d53ed..983dd25 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -3,3 +3,5 @@
 CONFIG_NET_CLS_BPF=m
 CONFIG_BPF_EVENTS=y
 CONFIG_TEST_BPF=m
+CONFIG_CGROUP_BPF=y
+CONFIG_NETDEVSIM=m
diff --git a/tools/testing/selftests/bpf/sample_ret0.c b/tools/testing/selftests/bpf/sample_ret0.c
new file mode 100644
index 0000000..fec9975
--- /dev/null
+++ b/tools/testing/selftests/bpf/sample_ret0.c
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+
+/* Sample program which should always load for testing control paths. */
+int func()
+{
+	return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 471bbbd..e19b410 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -64,11 +64,11 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv2"},
-			{2, "R3=inv4"},
-			{3, "R3=inv8"},
-			{4, "R3=inv16"},
-			{5, "R3=inv32"},
+			{1, "R3_w=inv2"},
+			{2, "R3_w=inv4"},
+			{3, "R3_w=inv8"},
+			{4, "R3_w=inv16"},
+			{5, "R3_w=inv32"},
 		},
 	},
 	{
@@ -92,17 +92,17 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv1"},
-			{2, "R3=inv2"},
-			{3, "R3=inv4"},
-			{4, "R3=inv8"},
-			{5, "R3=inv16"},
-			{6, "R3=inv1"},
-			{7, "R4=inv32"},
-			{8, "R4=inv16"},
-			{9, "R4=inv8"},
-			{10, "R4=inv4"},
-			{11, "R4=inv2"},
+			{1, "R3_w=inv1"},
+			{2, "R3_w=inv2"},
+			{3, "R3_w=inv4"},
+			{4, "R3_w=inv8"},
+			{5, "R3_w=inv16"},
+			{6, "R3_w=inv1"},
+			{7, "R4_w=inv32"},
+			{8, "R4_w=inv16"},
+			{9, "R4_w=inv8"},
+			{10, "R4_w=inv4"},
+			{11, "R4_w=inv2"},
 		},
 	},
 	{
@@ -121,12 +121,12 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv4"},
-			{2, "R3=inv8"},
-			{3, "R3=inv10"},
-			{4, "R4=inv8"},
-			{5, "R4=inv12"},
-			{6, "R4=inv14"},
+			{1, "R3_w=inv4"},
+			{2, "R3_w=inv8"},
+			{3, "R3_w=inv10"},
+			{4, "R4_w=inv8"},
+			{5, "R4_w=inv12"},
+			{6, "R4_w=inv14"},
 		},
 	},
 	{
@@ -143,10 +143,10 @@ static struct bpf_align_test tests[] = {
 		.matches = {
 			{1, "R1=ctx(id=0,off=0,imm=0)"},
 			{1, "R10=fp0"},
-			{1, "R3=inv7"},
-			{2, "R3=inv7"},
-			{3, "R3=inv14"},
-			{4, "R3=inv56"},
+			{1, "R3_w=inv7"},
+			{2, "R3_w=inv7"},
+			{3, "R3_w=inv14"},
+			{4, "R3_w=inv56"},
 		},
 	},
 
@@ -185,18 +185,18 @@ static struct bpf_align_test tests[] = {
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.matches = {
 			{7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
-			{7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{8, "R3=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
-			{9, "R3=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-			{10, "R3=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-			{11, "R3=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+			{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+			{11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
 			{18, "R3=pkt_end(id=0,off=0,imm=0)"},
-			{18, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{19, "R4=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
-			{20, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
-			{21, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-			{22, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-			{23, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+			{18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
+			{20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+			{21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+			{22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
 		},
 	},
 	{
@@ -217,16 +217,16 @@ static struct bpf_align_test tests[] = {
 		},
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.matches = {
-			{7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{8, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{9, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{11, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
-			{12, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{13, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
-			{14, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{15, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
-			{16, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
+			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
+			{12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
+			{16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
 		},
 	},
 	{
@@ -257,14 +257,14 @@ static struct bpf_align_test tests[] = {
 		},
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.matches = {
-			{4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
-			{5, "R5=pkt(id=0,off=14,r=0,imm=0)"},
-			{6, "R4=pkt(id=0,off=14,r=0,imm=0)"},
+			{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
+			{5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
+			{6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
 			{10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
 			{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
-			{10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
-			{14, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
-			{15, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+			{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
+			{15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
 		},
 	},
 	{
@@ -320,11 +320,11 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Offset is added to packet pointer R5, resulting in
 			 * known fixed offset, and variable offset from R6.
 			 */
-			{11, "R5=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* At the time the word size load is performed from R5,
 			 * it's total offset is NET_IP_ALIGN + reg->off (0) +
 			 * reg->aux_off (14) which is 16.  Then the variable
@@ -336,11 +336,11 @@ static struct bpf_align_test tests[] = {
 			/* Variable offset is added to R5 packet pointer,
 			 * resulting in auxiliary alignment of 4.
 			 */
-			{18, "R5=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Constant offset is added to R5, resulting in
 			 * reg->off of 14.
 			 */
-			{19, "R5=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off
 			 * (14) which is 16.  Then the variable offset is 4-byte
@@ -352,18 +352,18 @@ static struct bpf_align_test tests[] = {
 			/* Constant offset is added to R5 packet pointer,
 			 * resulting in reg->off value of 14.
 			 */
-			{26, "R5=pkt(id=0,off=14,r=8"},
+			{26, "R5_w=pkt(id=0,off=14,r=8"},
 			/* Variable offset is added to R5, resulting in a
 			 * variable offset of (4n).
 			 */
-			{27, "R5=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Constant is added to R5 again, setting reg->off to 18. */
-			{28, "R5=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* And once more we add a variable; resulting var_off
 			 * is still (4n), fixed offset is not changed.
 			 * Also, we create a new reg->id.
 			 */
-			{29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
+			{29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
 			 * which is 20.  Then the variable offset is (4n), so
@@ -410,11 +410,11 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Adding 14 makes R6 be (4n+2) */
-			{9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+			{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			/* Packet pointer has (4n+2) offset */
-			{11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+			{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			{13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -426,11 +426,11 @@ static struct bpf_align_test tests[] = {
 			/* Newly read value in R6 was shifted left by 2, so has
 			 * known alignment of 4.
 			 */
-			{18, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Added (4n) to packet pointer's (4n+2) var_off, giving
 			 * another (4n+2).
 			 */
-			{19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
+			{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
 			{21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
@@ -473,7 +473,7 @@ static struct bpf_align_test tests[] = {
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 		.result = REJECT,
 		.matches = {
-			{4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
+			{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
 			/* R5 bitwise operator &= on pointer prohibited */
 		}
 	},
@@ -510,11 +510,11 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{9, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Adding 14 makes R6 be (4n+2) */
-			{10, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
+			{10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
 			/* New unknown value in R7 is (4n) */
-			{11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+			{11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
 			/* Subtracting it from R6 blows our unsigned bounds */
 			{12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
 			/* Checked s>= 0 */
@@ -563,15 +563,15 @@ static struct bpf_align_test tests[] = {
 			 * alignment of 4.
 			 */
 			{7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
-			{10, "R6=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
+			{10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
 			/* Adding 14 makes R6 be (4n+2) */
-			{11, "R6=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
+			{11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
 			/* Subtracting from packet pointer overflows ubounds */
-			{13, "R5=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
+			{13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
 			/* New unknown value in R7 is (4n), >= 76 */
-			{15, "R7=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
+			{15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
 			/* Adding it to packet pointer gives nice bounds again */
-			{16, "R5=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
+			{16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
 			/* At the time the word size load is performed from R5,
 			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
 			 * which is 2.  Then the variable offset is (4n+2), so
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 02c85d6..c1535b3 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -10,6 +10,8 @@
 #include <string.h>
 #include <errno.h>
 #include <assert.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 
 #include <linux/bpf.h>
 #include <bpf/bpf.h>
@@ -23,15 +25,19 @@
 
 int main(int argc, char **argv)
 {
+	struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
 	struct bpf_object *obj;
 	int error = EXIT_FAILURE;
 	int prog_fd, cgroup_fd;
 	__u32 prog_cnt;
 
+	if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
+		perror("Unable to lift memlock rlimit");
+
 	if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
 			  &obj, &prog_fd)) {
 		printf("Failed to load DEV_CGROUP program\n");
-		goto err;
+		goto out;
 	}
 
 	if (setup_cgroup_environment()) {
@@ -89,5 +95,6 @@ int main(int argc, char **argv)
 err:
 	cleanup_cgroup_environment();
 
+out:
 	return error;
 }
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/test_l4lb_noinline.c
new file mode 100644
index 0000000..ba44a14
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_l4lb_noinline.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+#include "test_iptunnel_common.h"
+#include "bpf_endian.h"
+
+int _version SEC("version") = 1;
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+	return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c)			\
+{						\
+	a -= c;  a ^= rol32(c, 4);  c += b;	\
+	b -= a;  b ^= rol32(a, 6);  a += c;	\
+	c -= b;  c ^= rol32(b, 8);  b += a;	\
+	a -= c;  a ^= rol32(c, 16); c += b;	\
+	b -= a;  b ^= rol32(a, 19); a += c;	\
+	c -= b;  c ^= rol32(b, 4);  b += a;	\
+}
+
+#define __jhash_final(a, b, c)			\
+{						\
+	c ^= b; c -= rol32(b, 14);		\
+	a ^= c; a -= rol32(c, 11);		\
+	b ^= a; b -= rol32(a, 25);		\
+	c ^= b; c -= rol32(b, 16);		\
+	a ^= c; a -= rol32(c, 4);		\
+	b ^= a; b -= rol32(a, 14);		\
+	c ^= b; c -= rol32(b, 24);		\
+}
+
+#define JHASH_INITVAL		0xdeadbeef
+
+typedef unsigned int u32;
+
+static u32 jhash(const void *key, u32 length, u32 initval)
+{
+	u32 a, b, c;
+	const unsigned char *k = key;
+
+	a = b = c = JHASH_INITVAL + length + initval;
+
+	while (length > 12) {
+		a += *(u32 *)(k);
+		b += *(u32 *)(k + 4);
+		c += *(u32 *)(k + 8);
+		__jhash_mix(a, b, c);
+		length -= 12;
+		k += 12;
+	}
+	switch (length) {
+	case 12: c += (u32)k[11]<<24;
+	case 11: c += (u32)k[10]<<16;
+	case 10: c += (u32)k[9]<<8;
+	case 9:  c += k[8];
+	case 8:  b += (u32)k[7]<<24;
+	case 7:  b += (u32)k[6]<<16;
+	case 6:  b += (u32)k[5]<<8;
+	case 5:  b += k[4];
+	case 4:  a += (u32)k[3]<<24;
+	case 3:  a += (u32)k[2]<<16;
+	case 2:  a += (u32)k[1]<<8;
+	case 1:  a += k[0];
+		 __jhash_final(a, b, c);
+	case 0: /* Nothing left to add */
+		break;
+	}
+
+	return c;
+}
+
+static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+	a += initval;
+	b += initval;
+	c += initval;
+	__jhash_final(a, b, c);
+	return c;
+}
+
+static u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+	return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+#define PCKT_FRAGMENTED 65343
+#define IPV4_HDR_LEN_NO_OPT 20
+#define IPV4_PLUS_ICMP_HDR 28
+#define IPV6_PLUS_ICMP_HDR 48
+#define RING_SIZE 2
+#define MAX_VIPS 12
+#define MAX_REALS 5
+#define CTL_MAP_SIZE 16
+#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE)
+#define F_IPV6 (1 << 0)
+#define F_HASH_NO_SRC_PORT (1 << 0)
+#define F_ICMP (1 << 0)
+#define F_SYN_SET (1 << 1)
+
+struct packet_description {
+	union {
+		__be32 src;
+		__be32 srcv6[4];
+	};
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	union {
+		__u32 ports;
+		__u16 port16[2];
+	};
+	__u8 proto;
+	__u8 flags;
+};
+
+struct ctl_value {
+	union {
+		__u64 value;
+		__u32 ifindex;
+		__u8 mac[6];
+	};
+};
+
+struct vip_meta {
+	__u32 flags;
+	__u32 vip_num;
+};
+
+struct real_definition {
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	__u8 flags;
+};
+
+struct vip_stats {
+	__u64 bytes;
+	__u64 pkts;
+};
+
+struct eth_hdr {
+	unsigned char eth_dest[ETH_ALEN];
+	unsigned char eth_source[ETH_ALEN];
+	unsigned short eth_proto;
+};
+
+struct bpf_map_def SEC("maps") vip_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(struct vip),
+	.value_size = sizeof(struct vip_meta),
+	.max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ch_rings = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = CH_RINGS_SIZE,
+};
+
+struct bpf_map_def SEC("maps") reals = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct real_definition),
+	.max_entries = MAX_REALS,
+};
+
+struct bpf_map_def SEC("maps") stats = {
+	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct vip_stats),
+	.max_entries = MAX_VIPS,
+};
+
+struct bpf_map_def SEC("maps") ctl_array = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct ctl_value),
+	.max_entries = CTL_MAP_SIZE,
+};
+
+static __u32 get_packet_hash(struct packet_description *pckt,
+			     bool ipv6)
+{
+	if (ipv6)
+		return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
+				    pckt->ports, CH_RINGS_SIZE);
+	else
+		return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
+}
+
+static bool get_packet_dst(struct real_definition **real,
+			   struct packet_description *pckt,
+			   struct vip_meta *vip_info,
+			   bool is_ipv6)
+{
+	__u32 hash = get_packet_hash(pckt, is_ipv6);
+	__u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
+	__u32 *real_pos;
+
+	if (hash != 0x358459b7 /* jhash of ipv4 packet */  &&
+	    hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+		return 0;
+
+	real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+	if (!real_pos)
+		return false;
+	key = *real_pos;
+	*real = bpf_map_lookup_elem(&reals, &key);
+	if (!(*real))
+		return false;
+	return true;
+}
+
+static int parse_icmpv6(void *data, void *data_end, __u64 off,
+			struct packet_description *pckt)
+{
+	struct icmp6hdr *icmp_hdr;
+	struct ipv6hdr *ip6h;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return TC_ACT_SHOT;
+	if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG)
+		return TC_ACT_OK;
+	off += sizeof(struct icmp6hdr);
+	ip6h = data + off;
+	if (ip6h + 1 > data_end)
+		return TC_ACT_SHOT;
+	pckt->proto = ip6h->nexthdr;
+	pckt->flags |= F_ICMP;
+	memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16);
+	memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16);
+	return TC_ACT_UNSPEC;
+}
+
+static int parse_icmp(void *data, void *data_end, __u64 off,
+		      struct packet_description *pckt)
+{
+	struct icmphdr *icmp_hdr;
+	struct iphdr *iph;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return TC_ACT_SHOT;
+	if (icmp_hdr->type != ICMP_DEST_UNREACH ||
+	    icmp_hdr->code != ICMP_FRAG_NEEDED)
+		return TC_ACT_OK;
+	off += sizeof(struct icmphdr);
+	iph = data + off;
+	if (iph + 1 > data_end)
+		return TC_ACT_SHOT;
+	if (iph->ihl != 5)
+		return TC_ACT_SHOT;
+	pckt->proto = iph->protocol;
+	pckt->flags |= F_ICMP;
+	pckt->src = iph->daddr;
+	pckt->dst = iph->saddr;
+	return TC_ACT_UNSPEC;
+}
+
+static bool parse_udp(void *data, __u64 off, void *data_end,
+		      struct packet_description *pckt)
+{
+	struct udphdr *udp;
+	udp = data + off;
+
+	if (udp + 1 > data_end)
+		return false;
+
+	if (!(pckt->flags & F_ICMP)) {
+		pckt->port16[0] = udp->source;
+		pckt->port16[1] = udp->dest;
+	} else {
+		pckt->port16[0] = udp->dest;
+		pckt->port16[1] = udp->source;
+	}
+	return true;
+}
+
+static bool parse_tcp(void *data, __u64 off, void *data_end,
+		      struct packet_description *pckt)
+{
+	struct tcphdr *tcp;
+
+	tcp = data + off;
+	if (tcp + 1 > data_end)
+		return false;
+
+	if (tcp->syn)
+		pckt->flags |= F_SYN_SET;
+
+	if (!(pckt->flags & F_ICMP)) {
+		pckt->port16[0] = tcp->source;
+		pckt->port16[1] = tcp->dest;
+	} else {
+		pckt->port16[0] = tcp->dest;
+		pckt->port16[1] = tcp->source;
+	}
+	return true;
+}
+
+static int process_packet(void *data, __u64 off, void *data_end,
+			  bool is_ipv6, struct __sk_buff *skb)
+{
+	void *pkt_start = (void *)(long)skb->data;
+	struct packet_description pckt = {};
+	struct eth_hdr *eth = pkt_start;
+	struct bpf_tunnel_key tkey = {};
+	struct vip_stats *data_stats;
+	struct real_definition *dst;
+	struct vip_meta *vip_info;
+	struct ctl_value *cval;
+	__u32 v4_intf_pos = 1;
+	__u32 v6_intf_pos = 2;
+	struct ipv6hdr *ip6h;
+	struct vip vip = {};
+	struct iphdr *iph;
+	int tun_flag = 0;
+	__u16 pkt_bytes;
+	__u64 iph_len;
+	__u32 ifindex;
+	__u8 protocol;
+	__u32 vip_num;
+	int action;
+
+	tkey.tunnel_ttl = 64;
+	if (is_ipv6) {
+		ip6h = data + off;
+		if (ip6h + 1 > data_end)
+			return TC_ACT_SHOT;
+
+		iph_len = sizeof(struct ipv6hdr);
+		protocol = ip6h->nexthdr;
+		pckt.proto = protocol;
+		pkt_bytes = bpf_ntohs(ip6h->payload_len);
+		off += iph_len;
+		if (protocol == IPPROTO_FRAGMENT) {
+			return TC_ACT_SHOT;
+		} else if (protocol == IPPROTO_ICMPV6) {
+			action = parse_icmpv6(data, data_end, off, &pckt);
+			if (action >= 0)
+				return action;
+			off += IPV6_PLUS_ICMP_HDR;
+		} else {
+			memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16);
+			memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16);
+		}
+	} else {
+		iph = data + off;
+		if (iph + 1 > data_end)
+			return TC_ACT_SHOT;
+		if (iph->ihl != 5)
+			return TC_ACT_SHOT;
+
+		protocol = iph->protocol;
+		pckt.proto = protocol;
+		pkt_bytes = bpf_ntohs(iph->tot_len);
+		off += IPV4_HDR_LEN_NO_OPT;
+
+		if (iph->frag_off & PCKT_FRAGMENTED)
+			return TC_ACT_SHOT;
+		if (protocol == IPPROTO_ICMP) {
+			action = parse_icmp(data, data_end, off, &pckt);
+			if (action >= 0)
+				return action;
+			off += IPV4_PLUS_ICMP_HDR;
+		} else {
+			pckt.src = iph->saddr;
+			pckt.dst = iph->daddr;
+		}
+	}
+	protocol = pckt.proto;
+
+	if (protocol == IPPROTO_TCP) {
+		if (!parse_tcp(data, off, data_end, &pckt))
+			return TC_ACT_SHOT;
+	} else if (protocol == IPPROTO_UDP) {
+		if (!parse_udp(data, off, data_end, &pckt))
+			return TC_ACT_SHOT;
+	} else {
+		return TC_ACT_SHOT;
+	}
+
+	if (is_ipv6)
+		memcpy(vip.daddr.v6, pckt.dstv6, 16);
+	else
+		vip.daddr.v4 = pckt.dst;
+
+	vip.dport = pckt.port16[1];
+	vip.protocol = pckt.proto;
+	vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+	if (!vip_info) {
+		vip.dport = 0;
+		vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+		if (!vip_info)
+			return TC_ACT_SHOT;
+		pckt.port16[1] = 0;
+	}
+
+	if (vip_info->flags & F_HASH_NO_SRC_PORT)
+		pckt.port16[0] = 0;
+
+	if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6))
+		return TC_ACT_SHOT;
+
+	if (dst->flags & F_IPV6) {
+		cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos);
+		if (!cval)
+			return TC_ACT_SHOT;
+		ifindex = cval->ifindex;
+		memcpy(tkey.remote_ipv6, dst->dstv6, 16);
+		tun_flag = BPF_F_TUNINFO_IPV6;
+	} else {
+		cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos);
+		if (!cval)
+			return TC_ACT_SHOT;
+		ifindex = cval->ifindex;
+		tkey.remote_ipv4 = dst->dst;
+	}
+	vip_num = vip_info->vip_num;
+	data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+	if (!data_stats)
+		return TC_ACT_SHOT;
+	data_stats->pkts++;
+	data_stats->bytes += pkt_bytes;
+	bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag);
+	*(u32 *)eth->eth_dest = tkey.remote_ipv4;
+	return bpf_redirect(ifindex, 0);
+}
+
+SEC("l4lb-demo")
+int balancer_ingress(struct __sk_buff *ctx)
+{
+	void *data_end = (void *)(long)ctx->data_end;
+	void *data = (void *)(long)ctx->data;
+	struct eth_hdr *eth = data;
+	__u32 eth_proto;
+	__u32 nh_off;
+
+	nh_off = sizeof(struct eth_hdr);
+	if (data + nh_off > data_end)
+		return TC_ACT_SHOT;
+	eth_proto = eth->eth_proto;
+	if (eth_proto == bpf_htons(ETH_P_IP))
+		return process_packet(data, nh_off, data_end, false, ctx);
+	else if (eth_proto == bpf_htons(ETH_P_IPV6))
+		return process_packet(data, nh_off, data_end, true, ctx);
+	else
+		return TC_ACT_SHOT;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
new file mode 100755
index 0000000..e3c750f
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -0,0 +1,771 @@
+#!/usr/bin/python3
+
+# Copyright (C) 2017 Netronome Systems, Inc.
+#
+# This software is licensed under the GNU General License Version 2,
+# June 1991 as shown in the file COPYING in the top-level directory of this
+# source tree.
+#
+# THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+# OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+# THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+from datetime import datetime
+import argparse
+import json
+import os
+import pprint
+import random
+import string
+import subprocess
+import time
+
+logfile = None
+log_level = 1
+bpf_test_dir = os.path.dirname(os.path.realpath(__file__))
+pp = pprint.PrettyPrinter()
+devs = [] # devices we created for clean up
+files = [] # files to be removed
+netns = [] # net namespaces to be removed
+
+def log_get_sec(level=0):
+    return "*" * (log_level + level)
+
+def log_level_inc(add=1):
+    global log_level
+    log_level += add
+
+def log_level_dec(sub=1):
+    global log_level
+    log_level -= sub
+
+def log_level_set(level):
+    global log_level
+    log_level = level
+
+def log(header, data, level=None):
+    """
+    Output to an optional log.
+    """
+    if logfile is None:
+        return
+    if level is not None:
+        log_level_set(level)
+
+    if not isinstance(data, str):
+        data = pp.pformat(data)
+
+    if len(header):
+        logfile.write("\n" + log_get_sec() + " ")
+        logfile.write(header)
+    if len(header) and len(data.strip()):
+        logfile.write("\n")
+    logfile.write(data)
+
+def skip(cond, msg):
+    if not cond:
+        return
+    print("SKIP: " + msg)
+    log("SKIP: " + msg, "", level=1)
+    os.sys.exit(0)
+
+def fail(cond, msg):
+    if not cond:
+        return
+    print("FAIL: " + msg)
+    log("FAIL: " + msg, "", level=1)
+    os.sys.exit(1)
+
+def start_test(msg):
+    log(msg, "", level=1)
+    log_level_inc()
+    print(msg)
+
+def cmd(cmd, shell=True, include_stderr=False, background=False, fail=True):
+    """
+    Run a command in subprocess and return tuple of (retval, stdout);
+    optionally return stderr as well as third value.
+    """
+    proc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE)
+    if background:
+        msg = "%s START: %s" % (log_get_sec(1),
+                                datetime.now().strftime("%H:%M:%S.%f"))
+        log("BKG " + proc.args, msg)
+        return proc
+
+    return cmd_result(proc, include_stderr=include_stderr, fail=fail)
+
+def cmd_result(proc, include_stderr=False, fail=False):
+    stdout, stderr = proc.communicate()
+    stdout = stdout.decode("utf-8")
+    stderr = stderr.decode("utf-8")
+    proc.stdout.close()
+    proc.stderr.close()
+
+    stderr = "\n" + stderr
+    if stderr[-1] == "\n":
+        stderr = stderr[:-1]
+
+    sec = log_get_sec(1)
+    log("CMD " + proc.args,
+        "RETCODE: %d\n%s STDOUT:\n%s%s STDERR:%s\n%s END: %s" %
+        (proc.returncode, sec, stdout, sec, stderr,
+         sec, datetime.now().strftime("%H:%M:%S.%f")))
+
+    if proc.returncode != 0 and fail:
+        if len(stderr) > 0 and stderr[-1] == "\n":
+            stderr = stderr[:-1]
+        raise Exception("Command failed: %s\n%s" % (proc.args, stderr))
+
+    if include_stderr:
+        return proc.returncode, stdout, stderr
+    else:
+        return proc.returncode, stdout
+
+def rm(f):
+    cmd("rm -f %s" % (f))
+    if f in files:
+        files.remove(f)
+
+def tool(name, args, flags, JSON=True, ns="", fail=True):
+    params = ""
+    if JSON:
+        params += "%s " % (flags["json"])
+
+    if ns != "":
+        ns = "ip netns exec %s " % (ns)
+
+    ret, out = cmd(ns + name + " " + params + args, fail=fail)
+    if JSON and len(out.strip()) != 0:
+        return ret, json.loads(out)
+    else:
+        return ret, out
+
+def bpftool(args, JSON=True, ns="", fail=True):
+    return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
+
+def bpftool_prog_list(expected=None, ns=""):
+    _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+    if expected is not None:
+        if len(progs) != expected:
+            fail(True, "%d BPF programs loaded, expected %d" %
+                 (len(progs), expected))
+    return progs
+
+def bpftool_prog_list_wait(expected=0, n_retry=20):
+    for i in range(n_retry):
+        nprogs = len(bpftool_prog_list())
+        if nprogs == expected:
+            return
+        time.sleep(0.05)
+    raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs))
+
+def ip(args, force=False, JSON=True, ns="", fail=True):
+    if force:
+        args = "-force " + args
+    return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns, fail=fail)
+
+def tc(args, JSON=True, ns="", fail=True):
+    return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail)
+
+def ethtool(dev, opt, args, fail=True):
+    return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail)
+
+def bpf_obj(name, sec=".text", path=bpf_test_dir,):
+    return "obj %s sec %s" % (os.path.join(path, name), sec)
+
+def bpf_pinned(name):
+    return "pinned %s" % (name)
+
+def bpf_bytecode(bytecode):
+    return "bytecode \"%s\"" % (bytecode)
+
+def mknetns(n_retry=10):
+    for i in range(n_retry):
+        name = ''.join([random.choice(string.ascii_letters) for i in range(8)])
+        ret, _ = ip("netns add %s" % (name), fail=False)
+        if ret == 0:
+            netns.append(name)
+            return name
+    return None
+
+class DebugfsDir:
+    """
+    Class for accessing DebugFS directories as a dictionary.
+    """
+
+    def __init__(self, path):
+        self.path = path
+        self._dict = self._debugfs_dir_read(path)
+
+    def __len__(self):
+        return len(self._dict.keys())
+
+    def __getitem__(self, key):
+        if type(key) is int:
+            key = list(self._dict.keys())[key]
+        return self._dict[key]
+
+    def __setitem__(self, key, value):
+        log("DebugFS set %s = %s" % (key, value), "")
+        log_level_inc()
+
+        cmd("echo '%s' > %s/%s" % (value, self.path, key))
+        log_level_dec()
+
+        _, out = cmd('cat %s/%s' % (self.path, key))
+        self._dict[key] = out.strip()
+
+    def _debugfs_dir_read(self, path):
+        dfs = {}
+
+        log("DebugFS state for %s" % (path), "")
+        log_level_inc(add=2)
+
+        _, out = cmd('ls ' + path)
+        for f in out.split():
+            p = os.path.join(path, f)
+            if os.path.isfile(p):
+                _, out = cmd('cat %s/%s' % (path, f))
+                dfs[f] = out.strip()
+            elif os.path.isdir(p):
+                dfs[f] = DebugfsDir(p)
+            else:
+                raise Exception("%s is neither file nor directory" % (p))
+
+        log_level_dec()
+        log("DebugFS state", dfs)
+        log_level_dec()
+
+        return dfs
+
+class NetdevSim:
+    """
+    Class for netdevsim netdevice and its attributes.
+    """
+
+    def __init__(self):
+        self.dev = self._netdevsim_create()
+        devs.append(self)
+
+        self.ns = ""
+
+        self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname'])
+        self.dfs_refresh()
+
+    def __getitem__(self, key):
+        return self.dev[key]
+
+    def _netdevsim_create(self):
+        _, old  = ip("link show")
+        ip("link add sim%d type netdevsim")
+        _, new  = ip("link show")
+
+        for dev in new:
+            f = filter(lambda x: x["ifname"] == dev["ifname"], old)
+            if len(list(f)) == 0:
+                return dev
+
+        raise Exception("failed to create netdevsim device")
+
+    def remove(self):
+        devs.remove(self)
+        ip("link del dev %s" % (self.dev["ifname"]), ns=self.ns)
+
+    def dfs_refresh(self):
+        self.dfs = DebugfsDir(self.dfs_dir)
+        return self.dfs
+
+    def dfs_num_bound_progs(self):
+        path = os.path.join(self.dfs_dir, "bpf_bound_progs")
+        _, progs = cmd('ls %s' % (path))
+        return len(progs.split())
+
+    def dfs_get_bound_progs(self, expected):
+        progs = DebugfsDir(os.path.join(self.dfs_dir, "bpf_bound_progs"))
+        if expected is not None:
+            if len(progs) != expected:
+                fail(True, "%d BPF programs bound, expected %d" %
+                     (len(progs), expected))
+        return progs
+
+    def wait_for_flush(self, bound=0, total=0, n_retry=20):
+        for i in range(n_retry):
+            nbound = self.dfs_num_bound_progs()
+            nprogs = len(bpftool_prog_list())
+            if nbound == bound and nprogs == total:
+                return
+            time.sleep(0.05)
+        raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs))
+
+    def set_ns(self, ns):
+        name = "1" if ns == "" else ns
+        ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns)
+        self.ns = ns
+
+    def set_mtu(self, mtu, fail=True):
+        return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu),
+                  fail=fail)
+
+    def set_xdp(self, bpf, mode, force=False, fail=True):
+        return ip("link set dev %s xdp%s %s" % (self.dev["ifname"], mode, bpf),
+                  force=force, fail=fail)
+
+    def unset_xdp(self, mode, force=False, fail=True):
+        return ip("link set dev %s xdp%s off" % (self.dev["ifname"], mode),
+                  force=force, fail=fail)
+
+    def ip_link_show(self, xdp):
+        _, link = ip("link show dev %s" % (self['ifname']))
+        if len(link) > 1:
+            raise Exception("Multiple objects on ip link show")
+        if len(link) < 1:
+            return {}
+        fail(xdp != "xdp" in link,
+             "XDP program not reporting in iplink (reported %s, expected %s)" %
+             ("xdp" in link, xdp))
+        return link[0]
+
+    def tc_add_ingress(self):
+        tc("qdisc add dev %s ingress" % (self['ifname']))
+
+    def tc_del_ingress(self):
+        tc("qdisc del dev %s ingress" % (self['ifname']))
+
+    def tc_flush_filters(self, bound=0, total=0):
+        self.tc_del_ingress()
+        self.tc_add_ingress()
+        self.wait_for_flush(bound=bound, total=total)
+
+    def tc_show_ingress(self, expected=None):
+        # No JSON support, oh well...
+        flags = ["skip_sw", "skip_hw", "in_hw"]
+        named = ["protocol", "pref", "chain", "handle", "id", "tag"]
+
+        args = "-s filter show dev %s ingress" % (self['ifname'])
+        _, out = tc(args, JSON=False)
+
+        filters = []
+        lines = out.split('\n')
+        for line in lines:
+            words = line.split()
+            if "handle" not in words:
+                continue
+            fltr = {}
+            for flag in flags:
+                fltr[flag] = flag in words
+            for name in named:
+                try:
+                    idx = words.index(name)
+                    fltr[name] = words[idx + 1]
+                except ValueError:
+                    pass
+            filters.append(fltr)
+
+        if expected is not None:
+            fail(len(filters) != expected,
+                 "%d ingress filters loaded, expected %d" %
+                 (len(filters), expected))
+        return filters
+
+    def cls_bpf_add_filter(self, bpf, da=False, skip_sw=False, skip_hw=False,
+                           fail=True):
+        params = ""
+        if da:
+            params += " da"
+        if skip_sw:
+            params += " skip_sw"
+        if skip_hw:
+            params += " skip_hw"
+        return tc("filter add dev %s ingress bpf %s %s" %
+                  (self['ifname'], bpf, params), fail=fail)
+
+    def set_ethtool_tc_offloads(self, enable, fail=True):
+        args = "hw-tc-offload %s" % ("on" if enable else "off")
+        return ethtool(self, "-K", args, fail=fail)
+
+################################################################################
+def clean_up():
+    for dev in devs:
+        dev.remove()
+    for f in files:
+        cmd("rm -f %s" % (f))
+    for ns in netns:
+        cmd("ip netns delete %s" % (ns))
+
+def pin_prog(file_name, idx=0):
+    progs = bpftool_prog_list(expected=(idx + 1))
+    prog = progs[idx]
+    bpftool("prog pin id %d %s" % (prog["id"], file_name))
+    files.append(file_name)
+
+    return file_name, bpf_pinned(file_name)
+
+def check_dev_info(other_ns, ns, pin_file=None, removed=False):
+    if removed:
+        bpftool_prog_list(expected=0)
+        ret, err = bpftool("prog show pin %s" % (pin_file), fail=False)
+        fail(ret == 0, "Showing prog with removed device did not fail")
+        fail(err["error"].find("No such device") == -1,
+             "Showing prog with removed device expected ENODEV, error is %s" %
+             (err["error"]))
+        return
+    progs = bpftool_prog_list(expected=int(not removed), ns=ns)
+    prog = progs[0]
+
+    fail("dev" not in prog.keys(), "Device parameters not reported")
+    dev = prog["dev"]
+    fail("ifindex" not in dev.keys(), "Device parameters not reported")
+    fail("ns_dev" not in dev.keys(), "Device parameters not reported")
+    fail("ns_inode" not in dev.keys(), "Device parameters not reported")
+
+    if not removed and not other_ns:
+        fail("ifname" not in dev.keys(), "Ifname not reported")
+        fail(dev["ifname"] != sim["ifname"],
+             "Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"]))
+    else:
+        fail("ifname" in dev.keys(), "Ifname is reported for other ns")
+        if removed:
+            fail(dev["ifindex"] != 0, "Device perameters not zero on removed")
+            fail(dev["ns_dev"] != 0, "Device perameters not zero on removed")
+            fail(dev["ns_inode"] != 0, "Device perameters not zero on removed")
+
+# Parse command line
+parser = argparse.ArgumentParser()
+parser.add_argument("--log", help="output verbose log to given file")
+args = parser.parse_args()
+if args.log:
+    logfile = open(args.log, 'w+')
+    logfile.write("# -*-Org-*-")
+
+log("Prepare...", "", level=1)
+log_level_inc()
+
+# Check permissions
+skip(os.getuid() != 0, "test must be run as root")
+
+# Check tools
+ret, progs = bpftool("prog", fail=False)
+skip(ret != 0, "bpftool not installed")
+# Check no BPF programs are loaded
+skip(len(progs) != 0, "BPF programs already loaded on the system")
+
+# Check netdevsim
+ret, out = cmd("modprobe netdevsim", fail=False)
+skip(ret != 0, "netdevsim module could not be loaded")
+
+# Check debugfs
+_, out = cmd("mount")
+if out.find("/sys/kernel/debug type debugfs") == -1:
+    cmd("mount -t debugfs none /sys/kernel/debug")
+
+# Check samples are compiled
+samples = ["sample_ret0.o"]
+for s in samples:
+    ret, out = cmd("ls %s/%s" % (bpf_test_dir, s), fail=False)
+    skip(ret != 0, "sample %s/%s not found, please compile it" %
+         (bpf_test_dir, s))
+
+# Check if net namespaces seem to work
+ns = mknetns()
+skip(ns is None, "Could not create a net namespace")
+cmd("ip netns delete %s" % (ns))
+netns = []
+
+try:
+    obj = bpf_obj("sample_ret0.o")
+    bytecode = bpf_bytecode("1,6 0 0 4294967295,")
+
+    start_test("Test destruction of generic XDP...")
+    sim = NetdevSim()
+    sim.set_xdp(obj, "generic")
+    sim.remove()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.tc_add_ingress()
+
+    start_test("Test TC non-offloaded...")
+    ret, _ = sim.cls_bpf_add_filter(obj, skip_hw=True, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+
+    start_test("Test TC non-offloaded isn't getting bound...")
+    ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+    sim.dfs_get_bound_progs(expected=0)
+
+    sim.tc_flush_filters()
+
+    start_test("Test TC offloads are off by default...")
+    ret, _ = sim.cls_bpf_add_filter(obj, skip_sw=True, fail=False)
+    fail(ret == 0, "TC filter loaded without enabling TC offloads")
+    sim.wait_for_flush()
+
+    sim.set_ethtool_tc_offloads(True)
+    sim.dfs["bpf_tc_non_bound_accept"] = "Y"
+
+    start_test("Test TC offload by default...")
+    ret, _ = sim.cls_bpf_add_filter(obj, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+    sim.dfs_get_bound_progs(expected=0)
+    ingress = sim.tc_show_ingress(expected=1)
+    fltr = ingress[0]
+    fail(not fltr["in_hw"], "Filter not offloaded by default")
+
+    sim.tc_flush_filters()
+
+    start_test("Test TC cBPF bytcode tries offload by default...")
+    ret, _ = sim.cls_bpf_add_filter(bytecode, fail=False)
+    fail(ret != 0, "Software TC filter did not load")
+    sim.dfs_get_bound_progs(expected=0)
+    ingress = sim.tc_show_ingress(expected=1)
+    fltr = ingress[0]
+    fail(not fltr["in_hw"], "Bytecode not offloaded by default")
+
+    sim.tc_flush_filters()
+    sim.dfs["bpf_tc_non_bound_accept"] = "N"
+
+    start_test("Test TC cBPF unbound bytecode doesn't offload...")
+    ret, _ = sim.cls_bpf_add_filter(bytecode, skip_sw=True, fail=False)
+    fail(ret == 0, "TC bytecode loaded for offload")
+    sim.wait_for_flush()
+
+    start_test("Test TC offloads work...")
+    ret, _ = sim.cls_bpf_add_filter(obj, skip_sw=True, fail=False)
+    fail(ret != 0, "TC filter did not load with TC offloads enabled")
+
+    start_test("Test TC offload basics...")
+    dfs = sim.dfs_get_bound_progs(expected=1)
+    progs = bpftool_prog_list(expected=1)
+    ingress = sim.tc_show_ingress(expected=1)
+
+    dprog = dfs[0]
+    prog = progs[0]
+    fltr = ingress[0]
+    fail(fltr["skip_hw"], "TC does reports 'skip_hw' on offloaded filter")
+    fail(not fltr["in_hw"], "TC does not report 'in_hw' for offloaded filter")
+    fail(not fltr["skip_sw"], "TC does not report 'skip_sw' back")
+
+    start_test("Test TC offload is device-bound...")
+    fail(str(prog["id"]) != fltr["id"], "Program IDs don't match")
+    fail(prog["tag"] != fltr["tag"], "Program tags don't match")
+    fail(fltr["id"] != dprog["id"], "Program IDs don't match")
+    fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+    fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+    start_test("Test disabling TC offloads is rejected while filters installed...")
+    ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+    fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
+
+    start_test("Test qdisc removal frees things...")
+    sim.tc_flush_filters()
+    sim.tc_show_ingress(expected=0)
+
+    start_test("Test disabling TC offloads is OK without filters...")
+    ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
+    fail(ret != 0,
+         "Driver refused to disable TC offloads without filters installed...")
+
+    sim.set_ethtool_tc_offloads(True)
+
+    start_test("Test destroying device gets rid of TC filters...")
+    sim.cls_bpf_add_filter(obj, skip_sw=True)
+    sim.remove()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.set_ethtool_tc_offloads(True)
+
+    start_test("Test destroying device gets rid of XDP...")
+    sim.set_xdp(obj, "offload")
+    sim.remove()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.set_ethtool_tc_offloads(True)
+
+    start_test("Test XDP prog reporting...")
+    sim.set_xdp(obj, "drv")
+    ipl = sim.ip_link_show(xdp=True)
+    progs = bpftool_prog_list(expected=1)
+    fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+         "Loaded program has wrong ID")
+
+    start_test("Test XDP prog replace without force...")
+    ret, _ = sim.set_xdp(obj, "drv", fail=False)
+    fail(ret == 0, "Replaced XDP program without -force")
+    sim.wait_for_flush(total=1)
+
+    start_test("Test XDP prog replace with force...")
+    ret, _ = sim.set_xdp(obj, "drv", force=True, fail=False)
+    fail(ret != 0, "Could not replace XDP program with -force")
+    bpftool_prog_list_wait(expected=1)
+    ipl = sim.ip_link_show(xdp=True)
+    progs = bpftool_prog_list(expected=1)
+    fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"],
+         "Loaded program has wrong ID")
+    fail("dev" in progs[0].keys(),
+         "Device parameters reported for non-offloaded program")
+
+    start_test("Test XDP prog replace with bad flags...")
+    ret, _ = sim.set_xdp(obj, "offload", force=True, fail=False)
+    fail(ret == 0, "Replaced XDP program with a program in different mode")
+    ret, _ = sim.set_xdp(obj, "", force=True, fail=False)
+    fail(ret == 0, "Replaced XDP program with a program in different mode")
+
+    start_test("Test XDP prog remove with bad flags...")
+    ret, _ = sim.unset_xdp("offload", force=True, fail=False)
+    fail(ret == 0, "Removed program with a bad mode mode")
+    ret, _ = sim.unset_xdp("", force=True, fail=False)
+    fail(ret == 0, "Removed program with a bad mode mode")
+
+    start_test("Test MTU restrictions...")
+    ret, _ = sim.set_mtu(9000, fail=False)
+    fail(ret == 0,
+         "Driver should refuse to increase MTU to 9000 with XDP loaded...")
+    sim.unset_xdp("drv")
+    bpftool_prog_list_wait(expected=0)
+    sim.set_mtu(9000)
+    ret, _ = sim.set_xdp(obj, "drv", fail=False)
+    fail(ret == 0, "Driver should refuse to load program with MTU of 9000...")
+    sim.set_mtu(1500)
+
+    sim.wait_for_flush()
+    start_test("Test XDP offload...")
+    sim.set_xdp(obj, "offload")
+    ipl = sim.ip_link_show(xdp=True)
+    link_xdp = ipl["xdp"]["prog"]
+    progs = bpftool_prog_list(expected=1)
+    prog = progs[0]
+    fail(link_xdp["id"] != prog["id"], "Loaded program has wrong ID")
+
+    start_test("Test XDP offload is device bound...")
+    dfs = sim.dfs_get_bound_progs(expected=1)
+    dprog = dfs[0]
+
+    fail(prog["id"] != link_xdp["id"], "Program IDs don't match")
+    fail(prog["tag"] != link_xdp["tag"], "Program tags don't match")
+    fail(str(link_xdp["id"]) != dprog["id"], "Program IDs don't match")
+    fail(dprog["state"] != "xlated", "Offloaded program state not translated")
+    fail(dprog["loaded"] != "Y", "Offloaded program is not loaded")
+
+    start_test("Test removing XDP program many times...")
+    sim.unset_xdp("offload")
+    sim.unset_xdp("offload")
+    sim.unset_xdp("drv")
+    sim.unset_xdp("drv")
+    sim.unset_xdp("")
+    sim.unset_xdp("")
+    bpftool_prog_list_wait(expected=0)
+
+    start_test("Test attempt to use a program for a wrong device...")
+    sim2 = NetdevSim()
+    sim2.set_xdp(obj, "offload")
+    pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+
+    ret, _ = sim.set_xdp(pinned, "offload", fail=False)
+    fail(ret == 0, "Pinned program loaded for a different device accepted")
+    sim2.remove()
+    ret, _ = sim.set_xdp(pinned, "offload", fail=False)
+    fail(ret == 0, "Pinned program loaded for a removed device accepted")
+    rm(pin_file)
+    bpftool_prog_list_wait(expected=0)
+
+    start_test("Test mixing of TC and XDP...")
+    sim.tc_add_ingress()
+    sim.set_xdp(obj, "offload")
+    ret, _ = sim.cls_bpf_add_filter(obj, skip_sw=True, fail=False)
+    fail(ret == 0, "Loading TC when XDP active should fail")
+    sim.unset_xdp("offload")
+    sim.wait_for_flush()
+
+    sim.cls_bpf_add_filter(obj, skip_sw=True)
+    ret, _ = sim.set_xdp(obj, "offload", fail=False)
+    fail(ret == 0, "Loading XDP when TC active should fail")
+
+    start_test("Test binding TC from pinned...")
+    pin_file, pinned = pin_prog("/sys/fs/bpf/tmp")
+    sim.tc_flush_filters(bound=1, total=1)
+    sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True)
+    sim.tc_flush_filters(bound=1, total=1)
+
+    start_test("Test binding XDP from pinned...")
+    sim.set_xdp(obj, "offload")
+    pin_file, pinned = pin_prog("/sys/fs/bpf/tmp2", idx=1)
+
+    sim.set_xdp(pinned, "offload", force=True)
+    sim.unset_xdp("offload")
+    sim.set_xdp(pinned, "offload", force=True)
+    sim.unset_xdp("offload")
+
+    start_test("Test offload of wrong type fails...")
+    ret, _ = sim.cls_bpf_add_filter(pinned, da=True, skip_sw=True, fail=False)
+    fail(ret == 0, "Managed to attach XDP program to TC")
+
+    start_test("Test asking for TC offload of two filters...")
+    sim.cls_bpf_add_filter(obj, da=True, skip_sw=True)
+    ret, _ = sim.cls_bpf_add_filter(obj, da=True, skip_sw=True, fail=False)
+    fail(ret == 0, "Managed to offload two TC filters at the same time")
+
+    sim.tc_flush_filters(bound=2, total=2)
+
+    start_test("Test if netdev removal waits for translation...")
+    delay_msec = 500
+    sim.dfs["bpf_bind_verifier_delay"] = delay_msec
+    start = time.time()
+    cmd_line = "tc filter add dev %s ingress bpf %s da skip_sw" % \
+               (sim['ifname'], obj)
+    tc_proc = cmd(cmd_line, background=True, fail=False)
+    # Wait for the verifier to start
+    while sim.dfs_num_bound_progs() <= 2:
+        pass
+    sim.remove()
+    end = time.time()
+    ret, _ = cmd_result(tc_proc, fail=False)
+    time_diff = end - start
+    log("Time", "start:\t%s\nend:\t%s\ndiff:\t%s" % (start, end, time_diff))
+
+    fail(ret == 0, "Managed to load TC filter on a unregistering device")
+    delay_sec = delay_msec * 0.001
+    fail(time_diff < delay_sec, "Removal process took %s, expected %s" %
+         (time_diff, delay_sec))
+
+    # Remove all pinned files and reinstantiate the netdev
+    clean_up()
+    bpftool_prog_list_wait(expected=0)
+
+    sim = NetdevSim()
+    sim.set_ethtool_tc_offloads(True)
+    sim.set_xdp(obj, "offload")
+
+    start_test("Test bpftool bound info reporting (own ns)...")
+    check_dev_info(False, "")
+
+    start_test("Test bpftool bound info reporting (other ns)...")
+    ns = mknetns()
+    sim.set_ns(ns)
+    check_dev_info(True, "")
+
+    start_test("Test bpftool bound info reporting (remote ns)...")
+    check_dev_info(False, ns)
+
+    start_test("Test bpftool bound info reporting (back to own ns)...")
+    sim.set_ns("")
+    check_dev_info(False, "")
+
+    pin_file, _ = pin_prog("/sys/fs/bpf/tmp")
+    sim.remove()
+
+    start_test("Test bpftool bound info reporting (removed dev)...")
+    check_dev_info(True, "", pin_file=pin_file, removed=True)
+
+    print("%s: OK" % (os.path.basename(__file__)))
+
+finally:
+    log("Clean up...", "", level=1)
+    log_level_inc()
+    clean_up()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 6761be1..b549308 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -21,8 +21,10 @@ typedef __u16 __sum16;
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/filter.h>
+#include <linux/perf_event.h>
 #include <linux/unistd.h>
 
+#include <sys/ioctl.h>
 #include <sys/wait.h>
 #include <sys/resource.h>
 #include <sys/types.h>
@@ -167,10 +169,9 @@ static void test_xdp(void)
 #define NUM_ITER 100000
 #define VIP_NUM 5
 
-static void test_l4lb(void)
+static void test_l4lb(const char *file)
 {
 	unsigned int nr_cpus = bpf_num_possible_cpus();
-	const char *file = "./test_l4lb.o";
 	struct vip key = {.protocol = 6};
 	struct vip_meta {
 		__u32 flags;
@@ -247,6 +248,95 @@ static void test_l4lb(void)
 	bpf_object__close(obj);
 }
 
+static void test_l4lb_all(void)
+{
+	const char *file1 = "./test_l4lb.o";
+	const char *file2 = "./test_l4lb_noinline.o";
+
+	test_l4lb(file1);
+	test_l4lb(file2);
+}
+
+static void test_xdp_noinline(void)
+{
+	const char *file = "./test_xdp_noinline.o";
+	unsigned int nr_cpus = bpf_num_possible_cpus();
+	struct vip key = {.protocol = 6};
+	struct vip_meta {
+		__u32 flags;
+		__u32 vip_num;
+	} value = {.vip_num = VIP_NUM};
+	__u32 stats_key = VIP_NUM;
+	struct vip_stats {
+		__u64 bytes;
+		__u64 pkts;
+	} stats[nr_cpus];
+	struct real_definition {
+		union {
+			__be32 dst;
+			__be32 dstv6[4];
+		};
+		__u8 flags;
+	} real_def = {.dst = MAGIC_VAL};
+	__u32 ch_key = 11, real_num = 3;
+	__u32 duration, retval, size;
+	int err, i, prog_fd, map_fd;
+	__u64 bytes = 0, pkts = 0;
+	struct bpf_object *obj;
+	char buf[128];
+	u32 *magic = (u32 *)buf;
+
+	err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
+	if (err) {
+		error_cnt++;
+		return;
+	}
+
+	map_fd = bpf_find_map(__func__, obj, "vip_map");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_update_elem(map_fd, &key, &value, 0);
+
+	map_fd = bpf_find_map(__func__, obj, "ch_rings");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
+
+	map_fd = bpf_find_map(__func__, obj, "reals");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
+
+	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+				buf, &size, &retval, &duration);
+	CHECK(err || errno || retval != 1 || size != 54 ||
+	      *magic != MAGIC_VAL, "ipv4",
+	      "err %d errno %d retval %d size %d magic %x\n",
+	      err, errno, retval, size, *magic);
+
+	err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+				buf, &size, &retval, &duration);
+	CHECK(err || errno || retval != 1 || size != 74 ||
+	      *magic != MAGIC_VAL, "ipv6",
+	      "err %d errno %d retval %d size %d magic %x\n",
+	      err, errno, retval, size, *magic);
+
+	map_fd = bpf_find_map(__func__, obj, "stats");
+	if (map_fd < 0)
+		goto out;
+	bpf_map_lookup_elem(map_fd, &stats_key, stats);
+	for (i = 0; i < nr_cpus; i++) {
+		bytes += stats[i].bytes;
+		pkts += stats[i].pkts;
+	}
+	if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
+		error_cnt++;
+		printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
+	}
+out:
+	bpf_object__close(obj);
+}
+
 static void test_tcp_estats(void)
 {
 	const char *file = "./test_tcp_estats.o";
@@ -617,6 +707,262 @@ static void test_obj_name(void)
 	}
 }
 
+static void test_tp_attach_query(void)
+{
+	const int num_progs = 3;
+	int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
+	__u32 duration = 0, info_len, saved_prog_ids[num_progs];
+	const char *file = "./test_tracepoint.o";
+	struct perf_event_query_bpf *query;
+	struct perf_event_attr attr = {};
+	struct bpf_object *obj[num_progs];
+	struct bpf_prog_info prog_info;
+	char buf[256];
+
+	snprintf(buf, sizeof(buf),
+		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+	efd = open(buf, O_RDONLY, 0);
+	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+		return;
+	bytes = read(efd, buf, sizeof(buf));
+	close(efd);
+	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+		  "read", "bytes %d errno %d\n", bytes, errno))
+		return;
+
+	attr.config = strtol(buf, NULL, 0);
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+	attr.sample_period = 1;
+	attr.wakeup_events = 1;
+
+	query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
+	for (i = 0; i < num_progs; i++) {
+		err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
+				    &prog_fd[i]);
+		if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+			goto cleanup1;
+
+		bzero(&prog_info, sizeof(prog_info));
+		prog_info.jited_prog_len = 0;
+		prog_info.xlated_prog_len = 0;
+		prog_info.nr_map_ids = 0;
+		info_len = sizeof(prog_info);
+		err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
+		if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
+			  err, errno))
+			goto cleanup1;
+		saved_prog_ids[i] = prog_info.id;
+
+		pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+				    0 /* cpu 0 */, -1 /* group id */,
+				    0 /* flags */);
+		if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
+			  pmu_fd[i], errno))
+			goto cleanup2;
+		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+		if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+			  err, errno))
+			goto cleanup3;
+
+		if (i == 0) {
+			/* check NULL prog array query */
+			query->ids_len = num_progs;
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+			if (CHECK(err || query->prog_cnt != 0,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d query->prog_cnt %u\n",
+				  err, errno, query->prog_cnt))
+				goto cleanup3;
+		}
+
+		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
+		if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+			  err, errno))
+			goto cleanup3;
+
+		if (i == 1) {
+			/* try to get # of programs only */
+			query->ids_len = 0;
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+			if (CHECK(err || query->prog_cnt != 2,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d query->prog_cnt %u\n",
+				  err, errno, query->prog_cnt))
+				goto cleanup3;
+
+			/* try a few negative tests */
+			/* invalid query pointer */
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
+				    (struct perf_event_query_bpf *)0x1);
+			if (CHECK(!err || errno != EFAULT,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d\n", err, errno))
+				goto cleanup3;
+
+			/* no enough space */
+			query->ids_len = 1;
+			err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+			if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
+				  "perf_event_ioc_query_bpf",
+				  "err %d errno %d query->prog_cnt %u\n",
+				  err, errno, query->prog_cnt))
+				goto cleanup3;
+		}
+
+		query->ids_len = num_progs;
+		err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
+		if (CHECK(err || query->prog_cnt != (i + 1),
+			  "perf_event_ioc_query_bpf",
+			  "err %d errno %d query->prog_cnt %u\n",
+			  err, errno, query->prog_cnt))
+			goto cleanup3;
+		for (j = 0; j < i + 1; j++)
+			if (CHECK(saved_prog_ids[j] != query->ids[j],
+				  "perf_event_ioc_query_bpf",
+				  "#%d saved_prog_id %x query prog_id %x\n",
+				  j, saved_prog_ids[j], query->ids[j]))
+				goto cleanup3;
+	}
+
+	i = num_progs - 1;
+	for (; i >= 0; i--) {
+ cleanup3:
+		ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
+ cleanup2:
+		close(pmu_fd[i]);
+ cleanup1:
+		bpf_object__close(obj[i]);
+	}
+	free(query);
+}
+
+static int compare_map_keys(int map1_fd, int map2_fd)
+{
+	__u32 key, next_key;
+	char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)];
+	int err;
+
+	err = bpf_map_get_next_key(map1_fd, NULL, &key);
+	if (err)
+		return err;
+	err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
+	if (err)
+		return err;
+
+	while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
+		err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
+		if (err)
+			return err;
+
+		key = next_key;
+	}
+	if (errno != ENOENT)
+		return -1;
+
+	return 0;
+}
+
+static void test_stacktrace_map()
+{
+	int control_map_fd, stackid_hmap_fd, stackmap_fd;
+	const char *file = "./test_stacktrace_map.o";
+	int bytes, efd, err, pmu_fd, prog_fd;
+	struct perf_event_attr attr = {};
+	__u32 key, val, duration = 0;
+	struct bpf_object *obj;
+	char buf[256];
+
+	err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
+	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
+		goto out;
+
+	/* Get the ID for the sched/sched_switch tracepoint */
+	snprintf(buf, sizeof(buf),
+		 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
+	efd = open(buf, O_RDONLY, 0);
+	if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
+		goto close_prog;
+
+	bytes = read(efd, buf, sizeof(buf));
+	close(efd);
+	if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
+		  "read", "bytes %d errno %d\n", bytes, errno))
+		goto close_prog;
+
+	/* Open the perf event and attach bpf progrram */
+	attr.config = strtol(buf, NULL, 0);
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
+	attr.sample_period = 1;
+	attr.wakeup_events = 1;
+	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
+			 0 /* cpu 0 */, -1 /* group id */,
+			 0 /* flags */);
+	if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
+		  pmu_fd, errno))
+		goto close_prog;
+
+	err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+	if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
+		  err, errno))
+		goto close_pmu;
+
+	err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
+	if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
+		  err, errno))
+		goto disable_pmu;
+
+	/* find map fds */
+	control_map_fd = bpf_find_map(__func__, obj, "control_map");
+	if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
+		  "err %d errno %d\n", err, errno))
+		goto disable_pmu;
+
+	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
+	if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
+		  "err %d errno %d\n", err, errno))
+		goto disable_pmu;
+
+	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
+	if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
+		  err, errno))
+		goto disable_pmu;
+
+	/* give some time for bpf program run */
+	sleep(1);
+
+	/* disable stack trace collection */
+	key = 0;
+	val = 1;
+	bpf_map_update_elem(control_map_fd, &key, &val, 0);
+
+	/* for every element in stackid_hmap, we can find a corresponding one
+	 * in stackmap, and vise versa.
+	 */
+	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
+	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
+		  "err %d errno %d\n", err, errno))
+		goto disable_pmu;
+
+	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
+	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
+		  "err %d errno %d\n", err, errno))
+		; /* fall through */
+
+disable_pmu:
+	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+
+close_pmu:
+	close(pmu_fd);
+
+close_prog:
+	bpf_object__close(obj);
+
+out:
+	return;
+}
+
 int main(void)
 {
 	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
@@ -625,11 +971,14 @@ int main(void)
 
 	test_pkt_access();
 	test_xdp();
-	test_l4lb();
+	test_l4lb_all();
+	test_xdp_noinline();
 	test_tcp_estats();
 	test_bpf_obj_id();
 	test_pkt_md_access();
 	test_obj_name();
+	test_tp_attach_query();
+	test_stacktrace_map();
 
 	printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
 	return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/test_stacktrace_map.c
new file mode 100644
index 0000000..76d85c5d
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_stacktrace_map.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH         127
+#endif
+
+struct bpf_map_def SEC("maps") control_map = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") stackid_hmap = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = 10000,
+};
+
+struct bpf_map_def SEC("maps") stackmap = {
+	.type = BPF_MAP_TYPE_STACK_TRACE,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
+	.max_entries = 10000,
+};
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+	unsigned long long pad;
+	char prev_comm[16];
+	int prev_pid;
+	int prev_prio;
+	long long prev_state;
+	char next_comm[16];
+	int next_pid;
+	int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+	__u32 key = 0, val = 0, *value_p;
+
+	value_p = bpf_map_lookup_elem(&control_map, &key);
+	if (value_p && *value_p)
+		return 0; /* skip if non-zero *value_p */
+
+	/* The size of stackmap and stackid_hmap should be the same */
+	key = bpf_get_stackid(ctx, &stackmap, 0);
+	if ((int)key >= 0)
+		bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_tracepoint.c b/tools/testing/selftests/bpf/test_tracepoint.c
new file mode 100644
index 0000000..04bf084
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tracepoint.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
+struct sched_switch_args {
+	unsigned long long pad;
+	char prev_comm[16];
+	int prev_pid;
+	int prev_prio;
+	long long prev_state;
+	char next_comm[16];
+	int next_pid;
+	int next_prio;
+};
+
+SEC("tracepoint/sched/sched_switch")
+int oncpu(struct sched_switch_args *ctx)
+{
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index b510174..5438479 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2,6 +2,7 @@
  * Testsuite for eBPF verifier
  *
  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
+ * Copyright (c) 2017 Facebook
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -277,7 +278,7 @@ static struct bpf_test tests[] = {
 		.insns = {
 			BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
 		},
-		.errstr = "jump out of range",
+		.errstr = "not an exit",
 		.result = REJECT,
 	},
 	{
@@ -5635,7 +5636,7 @@ static struct bpf_test tests[] = {
 		"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
 		.insns = {
 			BPF_MOV64_IMM(BPF_REG_1, 0),
-			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
@@ -5870,7 +5871,7 @@ static struct bpf_test tests[] = {
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
-			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
 			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
 			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
@@ -8640,6 +8641,1864 @@ static struct bpf_test tests[] = {
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
 	},
+	{
+		"calls: basic sanity",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: not on unpriviledged",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: overlapping caller/callee",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "last insn is not an exit or jmp",
+		.result = REJECT,
+	},
+	{
+		"calls: wrong recursive calls",
+		.insns = {
+			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: wrong src reg",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "BPF_CALL uses reserved fields",
+		.result = REJECT,
+	},
+	{
+		"calls: wrong off value",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "BPF_CALL uses reserved fields",
+		.result = REJECT,
+	},
+	{
+		"calls: jump back loop",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn 0 to 0",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call 2",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: conditional call 3",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call 4",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -5),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: conditional call 5",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -6),
+			BPF_MOV64_IMM(BPF_REG_0, 3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn",
+		.result = REJECT,
+	},
+	{
+		"calls: conditional call 6",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge from insn",
+		.result = REJECT,
+	},
+	{
+		"calls: using r0 returned by callee",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: using uninit r0 from callee",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "!read_ok",
+		.result = REJECT,
+	},
+	{
+		"calls: callee is using r1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: callee using args1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "allowed for root only",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: callee using wrong args2",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "R2 !read_ok",
+		.result = REJECT,
+	},
+	{
+		"calls: callee using two args",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
+				    offsetof(struct __sk_buff, len)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6,
+				    offsetof(struct __sk_buff, len)),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "allowed for root only",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"calls: callee changing pkt pointers",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+				    offsetof(struct xdp_md, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+				    offsetof(struct xdp_md, data_end)),
+			BPF_MOV64_REG(BPF_REG_8, BPF_REG_6),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			/* clear_all_pkt_pointers() has to walk all frames
+			 * to make sure that pkt pointers in the caller
+			 * are cleared when callee is calling a helper that
+			 * adjusts packet size
+			 */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_MOV32_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_xdp_adjust_head),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "R6 invalid mem access 'inv'",
+		.prog_type = BPF_PROG_TYPE_XDP,
+	},
+	{
+		"calls: two calls with args",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
+	{
+		"calls: calls with stack arith",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64),
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
+	{
+		"calls: calls with misaligned stack access",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63),
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
+		.errstr = "misaligned stack access",
+		.result = REJECT,
+	},
+	{
+		"calls: calls control flow, jump test",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 43),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
+	{
+		"calls: calls control flow, jump test 2",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 43),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "jump out of range from insn 1 to 4",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with bad jump",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range from insn 11 to 9",
+		.result = REJECT,
+	},
+	{
+		"calls: recursive call. test1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge",
+		.result = REJECT,
+	},
+	{
+		"calls: recursive call. test2",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "back-edge",
+		.result = REJECT,
+	},
+	{
+		"calls: unreachable code",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "unreachable insn 6",
+		.result = REJECT,
+	},
+	{
+		"calls: invalid call",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "invalid destination",
+		.result = REJECT,
+	},
+	{
+		"calls: invalid call 2",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "invalid destination",
+		.result = REJECT,
+	},
+	{
+		"calls: jumping across function bodies. test1",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: jumping across function bodies. test2",
+		.insns = {
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "jump out of range",
+		.result = REJECT,
+	},
+	{
+		"calls: call without exit",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "not an exit",
+		.result = REJECT,
+	},
+	{
+		"calls: call into middle of ld_imm64",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_LD_IMM64(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "last insn",
+		.result = REJECT,
+	},
+	{
+		"calls: call into middle of other call",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "last insn",
+		.result = REJECT,
+	},
+	{
+		"calls: ld_abs with changing ctx data in callee",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_LD_ABS(BPF_B, 0),
+			BPF_LD_ABS(BPF_H, 0),
+			BPF_LD_ABS(BPF_W, 0),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+			BPF_LD_ABS(BPF_B, 0),
+			BPF_LD_ABS(BPF_H, 0),
+			BPF_LD_ABS(BPF_W, 0),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
+			BPF_MOV64_IMM(BPF_REG_3, 2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_skb_vlan_push),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with bad fallthrough",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_0),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+		.errstr = "not an exit",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with stack read",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
+			BPF_EXIT_INSN(),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls with stack write",
+		.insns = {
+			/* main prog */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7),
+			BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_8),
+			/* write into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* read from stack frame of main prog */
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+	},
+	{
+		"calls: stack overflow using two frames (pre-call access)",
+		.insns = {
+			/* prog 1 */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* prog 2 */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "combined stack size",
+		.result = REJECT,
+	},
+	{
+		"calls: stack overflow using two frames (post-call access)",
+		.insns = {
+			/* prog 1 */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_EXIT_INSN(),
+
+			/* prog 2 */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "combined stack size",
+		.result = REJECT,
+	},
+	{
+		"calls: stack depth check using three frames. test1",
+		.insns = {
+			/* main */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		/* stack_main=32, stack_A=256, stack_B=64
+		 * and max(main+A, main+A+B) < 512
+		 */
+		.result = ACCEPT,
+	},
+	{
+		"calls: stack depth check using three frames. test2",
+		.insns = {
+			/* main */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		/* stack_main=32, stack_A=64, stack_B=256
+		 * and max(main+A, main+A+B) < 512
+		 */
+		.result = ACCEPT,
+	},
+	{
+		"calls: stack depth check using three frames. test3",
+		.insns = {
+			/* main */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+			BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+			BPF_EXIT_INSN(),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+			/* B */
+			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		/* stack_main=64, stack_A=224, stack_B=256
+		 * and max(main+A, main+A+B) > 512
+		 */
+		.errstr = "combined stack",
+		.result = REJECT,
+	},
+	{
+		"calls: stack depth check using three frames. test4",
+		/* void main(void) {
+		 *   func1(0);
+		 *   func1(1);
+		 *   func2(1);
+		 * }
+		 * void func1(int alloc_or_recurse) {
+		 *   if (alloc_or_recurse) {
+		 *     frame_pointer[-300] = 1;
+		 *   } else {
+		 *     func2(alloc_or_recurse);
+		 *   }
+		 * }
+		 * void func2(int alloc_or_recurse) {
+		 *   if (alloc_or_recurse) {
+		 *     frame_pointer[-300] = 1;
+		 *   }
+		 * }
+		 */
+		.insns = {
+			/* main */
+			BPF_MOV64_IMM(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_EXIT_INSN(),
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = REJECT,
+		.errstr = "combined stack",
+	},
+	{
+		"calls: stack depth check using three frames. test5",
+		.insns = {
+			/* main */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+			BPF_EXIT_INSN(),
+			/* A */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+			BPF_EXIT_INSN(),
+			/* B */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+			BPF_EXIT_INSN(),
+			/* C */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+			BPF_EXIT_INSN(),
+			/* D */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+			BPF_EXIT_INSN(),
+			/* E */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+			BPF_EXIT_INSN(),
+			/* F */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+			BPF_EXIT_INSN(),
+			/* G */
+			BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+			BPF_EXIT_INSN(),
+			/* H */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "call stack",
+		.result = REJECT,
+	},
+	{
+		"calls: spill into caller stack frame",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "cannot spill",
+		.result = REJECT,
+	},
+	{
+		"calls: write into caller stack frame",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			BPF_EXIT_INSN(),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+	},
+	{
+		"calls: write into callee stack frame",
+		.insns = {
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.errstr = "cannot return stack pointer",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls with stack write and void return",
+		.insns = {
+			/* main prog */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* write into stack frame of main prog */
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+			BPF_EXIT_INSN(), /* void return */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.result = ACCEPT,
+	},
+	{
+		"calls: ambiguous return value",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+			BPF_EXIT_INSN(),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "allowed for root only",
+		.result_unpriv = REJECT,
+		.errstr = "R0 !read_ok",
+		.result = REJECT,
+	},
+	{
+		"calls: two calls that return map_value",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
+
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			/* fetch secound map_value_ptr from the stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* call 3rd function twice */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* first time with fp-8 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			/* second time with fp-16 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			/* lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(), /* return 0 */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.fixup_map1 = { 23 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls that return map_value with bool condition",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* call 3rd function twice */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* first time with fp-8 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			/* second time with fp-16 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			/* fetch secound map_value_ptr from the stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			/* lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(), /* return 0 */
+			/* write map_value_ptr into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(), /* return 1 */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.fixup_map1 = { 23 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls that return map_value with incorrect bool check",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* call 3rd function twice */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* first time with fp-8 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			/* second time with fp-16 */
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			/* fetch secound map_value_ptr from the stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			/* lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(), /* return 0 */
+			/* write map_value_ptr into stack frame of main prog */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 1),
+			BPF_EXIT_INSN(), /* return 1 */
+		},
+		.prog_type = BPF_PROG_TYPE_XDP,
+		.fixup_map1 = { 23 },
+		.result = REJECT,
+		.errstr = "invalid read from stack off -16+0 size 8",
+	},
+	{
+		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = REJECT,
+		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
+	},
+	{
+		"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),  /* 34 */
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),  // 26
+			BPF_JMP_IMM(BPF_JA, 0, 0, 2),
+			/* write map_value_ptr into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34
+			BPF_JMP_IMM(BPF_JA, 0, 0, -30),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, -8),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = REJECT,
+		.errstr = "invalid access to map value, value_size=8 off=2 size=8",
+	},
+	{
+		"calls: two calls that receive map_value_ptr_or_null via arg. test1",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 1 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = ACCEPT,
+	},
+	{
+		"calls: two calls that receive map_value_ptr_or_null via arg. test2",
+		.insns = {
+			/* main prog */
+			/* pass fp-16, fp-8 into a function */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+			/* 1st lookup from map */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_8, 1),
+
+			/* 2nd lookup from map */
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-16 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_9, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+			BPF_MOV64_IMM(BPF_REG_9, 1),
+
+			/* call 3rd func with fp-8, 0|1, fp-16, 0|1 */
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_9),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 2 */
+			/* if arg2 == 1 do *arg1 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+
+			/* if arg4 == 0 do *arg3 = 0 */
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2),
+			/* fetch map_value_ptr from the stack of this function */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+			/* write into map value */
+			BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.fixup_map1 = { 12, 22 },
+		.result = REJECT,
+		.errstr = "R0 invalid mem access 'inv'",
+	},
+	{
+		"calls: pkt_ptr spill into caller stack",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			/* now the pkt range is verified, read pkt_ptr from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 2",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			/* Marking is still kept, but not in all cases safe. */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			/* now the pkt range is verified, read pkt_ptr from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "invalid access to packet",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 3",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+			/* Marking is still kept and safe here. */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* now the pkt range is verified, read pkt_ptr from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 4",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+			/* Check marking propagated. */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 5",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "same insn cannot be used with different",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 6",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "R4 invalid mem access",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 7",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_2, 0),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "R4 invalid mem access",
+		.result = REJECT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 8",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
+			/* spill checked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
+	{
+		"calls: pkt_ptr spill into caller stack 9",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+			BPF_MOV64_IMM(BPF_REG_5, 0),
+			/* spill unchecked pkt_ptr into stack of caller */
+			BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+			BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+			BPF_MOV64_IMM(BPF_REG_5, 1),
+			/* don't read back pkt_ptr from stack here */
+			/* write 4 bytes into packet */
+			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.errstr = "invalid access to packet",
+		.result = REJECT,
+	},
+	{
+		"calls: caller stack init to zero or map_value_or_null",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+			/* fetch map_value_or_null or const_zero from stack */
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			/* store into map_value */
+			BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+			BPF_EXIT_INSN(),
+
+			/* subprog 1 */
+			/* if (ctx == 0) return; */
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
+			/* else bpf_map_lookup() and *(fp - 8) = r0 */
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			/* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
+			BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 13 },
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_XDP,
+	},
+	{
+		"calls: stack init to zero and pruning",
+		.insns = {
+			/* first make allocated_stack 16 byte */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+			/* now fork the execution such that the false branch
+			 * of JGT insn will be verified second and it skisp zero
+			 * init of fp-8 stack slot. If stack liveness marking
+			 * is missing live_read marks from call map_lookup
+			 * processing then pruning will incorrectly assume
+			 * that fp-8 stack slot was unused in the fall-through
+			 * branch and will accept the program incorrectly
+			 */
+			BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_JMP_IMM(BPF_JA, 0, 0, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map2 = { 6 },
+		.errstr = "invalid indirect read from stack off -8+0 size 8",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_XDP,
+	},
+	{
+		"search pruning: all branches should be verified (nop operation)",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+			BPF_MOV64_IMM(BPF_REG_4, 0),
+			BPF_JMP_A(1),
+			BPF_MOV64_IMM(BPF_REG_4, 1),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
+			BPF_MOV64_IMM(BPF_REG_6, 0),
+			BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "R6 invalid mem access 'inv'",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
+	{
+		"search pruning: all branches should be verified (invalid stack access)",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_4, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
+			BPF_JMP_A(1),
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
+			BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "invalid read from stack off -16+0 size 8",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
+	},
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/test_xdp_noinline.c
new file mode 100644
index 0000000..5e4aac7
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_noinline.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Facebook
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h>
+#include <linux/pkt_cls.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include "bpf_helpers.h"
+
+#define bpf_printk(fmt, ...)				\
+({							\
+	char ____fmt[] = fmt;				\
+	bpf_trace_printk(____fmt, sizeof(____fmt),	\
+			##__VA_ARGS__);			\
+})
+
+static __u32 rol32(__u32 word, unsigned int shift)
+{
+	return (word << shift) | (word >> ((-shift) & 31));
+}
+
+/* copy paste of jhash from kernel sources to make sure llvm
+ * can compile it into valid sequence of bpf instructions
+ */
+#define __jhash_mix(a, b, c)			\
+{						\
+	a -= c;  a ^= rol32(c, 4);  c += b;	\
+	b -= a;  b ^= rol32(a, 6);  a += c;	\
+	c -= b;  c ^= rol32(b, 8);  b += a;	\
+	a -= c;  a ^= rol32(c, 16); c += b;	\
+	b -= a;  b ^= rol32(a, 19); a += c;	\
+	c -= b;  c ^= rol32(b, 4);  b += a;	\
+}
+
+#define __jhash_final(a, b, c)			\
+{						\
+	c ^= b; c -= rol32(b, 14);		\
+	a ^= c; a -= rol32(c, 11);		\
+	b ^= a; b -= rol32(a, 25);		\
+	c ^= b; c -= rol32(b, 16);		\
+	a ^= c; a -= rol32(c, 4);		\
+	b ^= a; b -= rol32(a, 14);		\
+	c ^= b; c -= rol32(b, 24);		\
+}
+
+#define JHASH_INITVAL		0xdeadbeef
+
+typedef unsigned int u32;
+
+static __attribute__ ((noinline))
+u32 jhash(const void *key, u32 length, u32 initval)
+{
+	u32 a, b, c;
+	const unsigned char *k = key;
+
+	a = b = c = JHASH_INITVAL + length + initval;
+
+	while (length > 12) {
+		a += *(u32 *)(k);
+		b += *(u32 *)(k + 4);
+		c += *(u32 *)(k + 8);
+		__jhash_mix(a, b, c);
+		length -= 12;
+		k += 12;
+	}
+	switch (length) {
+	case 12: c += (u32)k[11]<<24;
+	case 11: c += (u32)k[10]<<16;
+	case 10: c += (u32)k[9]<<8;
+	case 9:  c += k[8];
+	case 8:  b += (u32)k[7]<<24;
+	case 7:  b += (u32)k[6]<<16;
+	case 6:  b += (u32)k[5]<<8;
+	case 5:  b += k[4];
+	case 4:  a += (u32)k[3]<<24;
+	case 3:  a += (u32)k[2]<<16;
+	case 2:  a += (u32)k[1]<<8;
+	case 1:  a += k[0];
+		 __jhash_final(a, b, c);
+	case 0: /* Nothing left to add */
+		break;
+	}
+
+	return c;
+}
+
+static __attribute__ ((noinline))
+u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+	a += initval;
+	b += initval;
+	c += initval;
+	__jhash_final(a, b, c);
+	return c;
+}
+
+static __attribute__ ((noinline))
+u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+	return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+struct flow_key {
+	union {
+		__be32 src;
+		__be32 srcv6[4];
+	};
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	union {
+		__u32 ports;
+		__u16 port16[2];
+	};
+	__u8 proto;
+};
+
+struct packet_description {
+	struct flow_key flow;
+	__u8 flags;
+};
+
+struct ctl_value {
+	union {
+		__u64 value;
+		__u32 ifindex;
+		__u8 mac[6];
+	};
+};
+
+struct vip_definition {
+	union {
+		__be32 vip;
+		__be32 vipv6[4];
+	};
+	__u16 port;
+	__u16 family;
+	__u8 proto;
+};
+
+struct vip_meta {
+	__u32 flags;
+	__u32 vip_num;
+};
+
+struct real_pos_lru {
+	__u32 pos;
+	__u64 atime;
+};
+
+struct real_definition {
+	union {
+		__be32 dst;
+		__be32 dstv6[4];
+	};
+	__u8 flags;
+};
+
+struct lb_stats {
+	__u64 v2;
+	__u64 v1;
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(struct vip_definition),
+	.value_size = sizeof(struct vip_meta),
+	.max_entries = 512,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = {
+	.type = BPF_MAP_TYPE_LRU_HASH,
+	.key_size = sizeof(struct flow_key),
+	.value_size = sizeof(struct real_pos_lru),
+	.max_entries = 300,
+	.map_flags = 1U << 1,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(__u32),
+	.max_entries = 12 * 655,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) reals = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct real_definition),
+	.max_entries = 40,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) stats = {
+	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct lb_stats),
+	.max_entries = 515,
+	.map_flags = 0,
+};
+
+struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = {
+	.type = BPF_MAP_TYPE_ARRAY,
+	.key_size = sizeof(__u32),
+	.value_size = sizeof(struct ctl_value),
+	.max_entries = 16,
+	.map_flags = 0,
+};
+
+struct eth_hdr {
+	unsigned char eth_dest[6];
+	unsigned char eth_source[6];
+	unsigned short eth_proto;
+};
+
+static inline __u64 calc_offset(bool is_ipv6, bool is_icmp)
+{
+	__u64 off = sizeof(struct eth_hdr);
+	if (is_ipv6) {
+		off += sizeof(struct ipv6hdr);
+		if (is_icmp)
+			off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr);
+	} else {
+		off += sizeof(struct iphdr);
+		if (is_icmp)
+			off += sizeof(struct icmphdr) + sizeof(struct iphdr);
+	}
+	return off;
+}
+
+static __attribute__ ((noinline))
+bool parse_udp(void *data, void *data_end,
+	       bool is_ipv6, struct packet_description *pckt)
+{
+
+	bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+	__u64 off = calc_offset(is_ipv6, is_icmp);
+	struct udphdr *udp;
+	udp = data + off;
+
+	if (udp + 1 > data_end)
+		return 0;
+	if (!is_icmp) {
+		pckt->flow.port16[0] = udp->source;
+		pckt->flow.port16[1] = udp->dest;
+	} else {
+		pckt->flow.port16[0] = udp->dest;
+		pckt->flow.port16[1] = udp->source;
+	}
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool parse_tcp(void *data, void *data_end,
+	       bool is_ipv6, struct packet_description *pckt)
+{
+
+	bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
+	__u64 off = calc_offset(is_ipv6, is_icmp);
+	struct tcphdr *tcp;
+
+	tcp = data + off;
+	if (tcp + 1 > data_end)
+		return 0;
+	if (tcp->syn)
+		pckt->flags |= (1 << 1);
+	if (!is_icmp) {
+		pckt->flow.port16[0] = tcp->source;
+		pckt->flow.port16[1] = tcp->dest;
+	} else {
+		pckt->flow.port16[0] = tcp->dest;
+		pckt->flow.port16[1] = tcp->source;
+	}
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
+	      struct packet_description *pckt,
+	      struct real_definition *dst, __u32 pkt_bytes)
+{
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+	struct ipv6hdr *ip6h;
+	__u32 ip_suffix;
+	void *data_end;
+	void *data;
+
+	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
+		return 0;
+	data = (void *)(long)xdp->data;
+	data_end = (void *)(long)xdp->data_end;
+	new_eth = data;
+	ip6h = data + sizeof(struct eth_hdr);
+	old_eth = data + sizeof(struct ipv6hdr);
+	if (new_eth + 1 > data_end ||
+	    old_eth + 1 > data_end || ip6h + 1 > data_end)
+		return 0;
+	memcpy(new_eth->eth_dest, cval->mac, 6);
+	memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+	new_eth->eth_proto = 56710;
+	ip6h->version = 6;
+	ip6h->priority = 0;
+	memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+
+	ip6h->nexthdr = IPPROTO_IPV6;
+	ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
+	ip6h->payload_len =
+	    __builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr));
+	ip6h->hop_limit = 4;
+
+	ip6h->saddr.in6_u.u6_addr32[0] = 1;
+	ip6h->saddr.in6_u.u6_addr32[1] = 2;
+	ip6h->saddr.in6_u.u6_addr32[2] = 3;
+	ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
+	memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
+	      struct packet_description *pckt,
+	      struct real_definition *dst, __u32 pkt_bytes)
+{
+
+	__u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]);
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+	__u16 *next_iph_u16;
+	struct iphdr *iph;
+	__u32 csum = 0;
+	void *data_end;
+	void *data;
+
+	ip_suffix <<= 15;
+	ip_suffix ^= pckt->flow.src;
+	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
+		return 0;
+	data = (void *)(long)xdp->data;
+	data_end = (void *)(long)xdp->data_end;
+	new_eth = data;
+	iph = data + sizeof(struct eth_hdr);
+	old_eth = data + sizeof(struct iphdr);
+	if (new_eth + 1 > data_end ||
+	    old_eth + 1 > data_end || iph + 1 > data_end)
+		return 0;
+	memcpy(new_eth->eth_dest, cval->mac, 6);
+	memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
+	new_eth->eth_proto = 8;
+	iph->version = 4;
+	iph->ihl = 5;
+	iph->frag_off = 0;
+	iph->protocol = IPPROTO_IPIP;
+	iph->check = 0;
+	iph->tos = 1;
+	iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr));
+	/* don't update iph->daddr, since it will overwrite old eth_proto
+	 * and multiple iterations of bpf_prog_run() will fail
+	 */
+
+	iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst;
+	iph->ttl = 4;
+
+	next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+	for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+		csum += *next_iph_u16++;
+	iph->check = ~((csum & 0xffff) + (csum >> 16));
+	if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+		return 0;
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4)
+{
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+
+	old_eth = *data;
+	new_eth = *data + sizeof(struct ipv6hdr);
+	memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+	memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+	if (inner_v4)
+		new_eth->eth_proto = 8;
+	else
+		new_eth->eth_proto = 56710;
+	if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr)))
+		return 0;
+	*data = (void *)(long)xdp->data;
+	*data_end = (void *)(long)xdp->data_end;
+	return 1;
+}
+
+static __attribute__ ((noinline))
+bool decap_v4(struct xdp_md *xdp, void **data, void **data_end)
+{
+	struct eth_hdr *new_eth;
+	struct eth_hdr *old_eth;
+
+	old_eth = *data;
+	new_eth = *data + sizeof(struct iphdr);
+	memcpy(new_eth->eth_source, old_eth->eth_source, 6);
+	memcpy(new_eth->eth_dest, old_eth->eth_dest, 6);
+	new_eth->eth_proto = 8;
+	if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
+		return 0;
+	*data = (void *)(long)xdp->data;
+	*data_end = (void *)(long)xdp->data_end;
+	return 1;
+}
+
+static __attribute__ ((noinline))
+int swap_mac_and_send(void *data, void *data_end)
+{
+	unsigned char tmp_mac[6];
+	struct eth_hdr *eth;
+
+	eth = data;
+	memcpy(tmp_mac, eth->eth_source, 6);
+	memcpy(eth->eth_source, eth->eth_dest, 6);
+	memcpy(eth->eth_dest, tmp_mac, 6);
+	return XDP_TX;
+}
+
+static __attribute__ ((noinline))
+int send_icmp_reply(void *data, void *data_end)
+{
+	struct icmphdr *icmp_hdr;
+	__u16 *next_iph_u16;
+	__u32 tmp_addr = 0;
+	struct iphdr *iph;
+	__u32 csum1 = 0;
+	__u32 csum = 0;
+	__u64 off = 0;
+
+	if (data + sizeof(struct eth_hdr)
+	     + sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end)
+		return XDP_DROP;
+	off += sizeof(struct eth_hdr);
+	iph = data + off;
+	off += sizeof(struct iphdr);
+	icmp_hdr = data + off;
+	icmp_hdr->type = 0;
+	icmp_hdr->checksum += 0x0007;
+	iph->ttl = 4;
+	tmp_addr = iph->daddr;
+	iph->daddr = iph->saddr;
+	iph->saddr = tmp_addr;
+	iph->check = 0;
+	next_iph_u16 = (__u16 *) iph;
+#pragma clang loop unroll(full)
+	for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
+		csum += *next_iph_u16++;
+	iph->check = ~((csum & 0xffff) + (csum >> 16));
+	return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int send_icmp6_reply(void *data, void *data_end)
+{
+	struct icmp6hdr *icmp_hdr;
+	struct ipv6hdr *ip6h;
+	__be32 tmp_addr[4];
+	__u64 off = 0;
+
+	if (data + sizeof(struct eth_hdr)
+	     + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end)
+		return XDP_DROP;
+	off += sizeof(struct eth_hdr);
+	ip6h = data + off;
+	off += sizeof(struct ipv6hdr);
+	icmp_hdr = data + off;
+	icmp_hdr->icmp6_type = 129;
+	icmp_hdr->icmp6_cksum -= 0x0001;
+	ip6h->hop_limit = 4;
+	memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16);
+	memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16);
+	memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16);
+	return swap_mac_and_send(data, data_end);
+}
+
+static __attribute__ ((noinline))
+int parse_icmpv6(void *data, void *data_end, __u64 off,
+		 struct packet_description *pckt)
+{
+	struct icmp6hdr *icmp_hdr;
+	struct ipv6hdr *ip6h;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return XDP_DROP;
+	if (icmp_hdr->icmp6_type == 128)
+		return send_icmp6_reply(data, data_end);
+	if (icmp_hdr->icmp6_type != 3)
+		return XDP_PASS;
+	off += sizeof(struct icmp6hdr);
+	ip6h = data + off;
+	if (ip6h + 1 > data_end)
+		return XDP_DROP;
+	pckt->flow.proto = ip6h->nexthdr;
+	pckt->flags |= (1 << 0);
+	memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16);
+	memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16);
+	return -1;
+}
+
+static __attribute__ ((noinline))
+int parse_icmp(void *data, void *data_end, __u64 off,
+	       struct packet_description *pckt)
+{
+	struct icmphdr *icmp_hdr;
+	struct iphdr *iph;
+
+	icmp_hdr = data + off;
+	if (icmp_hdr + 1 > data_end)
+		return XDP_DROP;
+	if (icmp_hdr->type == 8)
+		return send_icmp_reply(data, data_end);
+	if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4))
+		return XDP_PASS;
+	off += sizeof(struct icmphdr);
+	iph = data + off;
+	if (iph + 1 > data_end)
+		return XDP_DROP;
+	if (iph->ihl != 5)
+		return XDP_DROP;
+	pckt->flow.proto = iph->protocol;
+	pckt->flags |= (1 << 0);
+	pckt->flow.src = iph->daddr;
+	pckt->flow.dst = iph->saddr;
+	return -1;
+}
+
+static __attribute__ ((noinline))
+__u32 get_packet_hash(struct packet_description *pckt,
+		      bool hash_16bytes)
+{
+	if (hash_16bytes)
+		return jhash_2words(jhash(pckt->flow.srcv6, 16, 12),
+				    pckt->flow.ports, 24);
+	else
+		return jhash_2words(pckt->flow.src, pckt->flow.ports,
+				    24);
+}
+
+__attribute__ ((noinline))
+static bool get_packet_dst(struct real_definition **real,
+			   struct packet_description *pckt,
+			   struct vip_meta *vip_info,
+			   bool is_ipv6, void *lru_map)
+{
+	struct real_pos_lru new_dst_lru = { };
+	bool hash_16bytes = is_ipv6;
+	__u32 *real_pos, hash, key;
+	__u64 cur_time;
+
+	if (vip_info->flags & (1 << 2))
+		hash_16bytes = 1;
+	if (vip_info->flags & (1 << 3)) {
+		pckt->flow.port16[0] = pckt->flow.port16[1];
+		memset(pckt->flow.srcv6, 0, 16);
+	}
+	hash = get_packet_hash(pckt, hash_16bytes);
+	if (hash != 0x358459b7 /* jhash of ipv4 packet */  &&
+	    hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
+		return 0;
+	key = 2 * vip_info->vip_num + hash % 2;
+	real_pos = bpf_map_lookup_elem(&ch_rings, &key);
+	if (!real_pos)
+		return 0;
+	key = *real_pos;
+	*real = bpf_map_lookup_elem(&reals, &key);
+	if (!(*real))
+		return 0;
+	if (!(vip_info->flags & (1 << 1))) {
+		__u32 conn_rate_key = 512 + 2;
+		struct lb_stats *conn_rate_stats =
+		    bpf_map_lookup_elem(&stats, &conn_rate_key);
+
+		if (!conn_rate_stats)
+			return 1;
+		cur_time = bpf_ktime_get_ns();
+		if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
+			conn_rate_stats->v1 = 1;
+			conn_rate_stats->v2 = cur_time;
+		} else {
+			conn_rate_stats->v1 += 1;
+			if (conn_rate_stats->v1 >= 1)
+				return 1;
+		}
+		if (pckt->flow.proto == IPPROTO_UDP)
+			new_dst_lru.atime = cur_time;
+		new_dst_lru.pos = key;
+		bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
+	}
+	return 1;
+}
+
+__attribute__ ((noinline))
+static void connection_table_lookup(struct real_definition **real,
+				    struct packet_description *pckt,
+				    void *lru_map)
+{
+
+	struct real_pos_lru *dst_lru;
+	__u64 cur_time;
+	__u32 key;
+
+	dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow);
+	if (!dst_lru)
+		return;
+	if (pckt->flow.proto == IPPROTO_UDP) {
+		cur_time = bpf_ktime_get_ns();
+		if (cur_time - dst_lru->atime > 300000)
+			return;
+		dst_lru->atime = cur_time;
+	}
+	key = dst_lru->pos;
+	*real = bpf_map_lookup_elem(&reals, &key);
+}
+
+/* don't believe your eyes!
+ * below function has 6 arguments whereas bpf and llvm allow maximum of 5
+ * but since it's _static_ llvm can optimize one argument away
+ */
+__attribute__ ((noinline))
+static int process_l3_headers_v6(struct packet_description *pckt,
+				 __u8 *protocol, __u64 off,
+				 __u16 *pkt_bytes, void *data,
+				 void *data_end)
+{
+	struct ipv6hdr *ip6h;
+	__u64 iph_len;
+	int action;
+
+	ip6h = data + off;
+	if (ip6h + 1 > data_end)
+		return XDP_DROP;
+	iph_len = sizeof(struct ipv6hdr);
+	*protocol = ip6h->nexthdr;
+	pckt->flow.proto = *protocol;
+	*pkt_bytes = __builtin_bswap16(ip6h->payload_len);
+	off += iph_len;
+	if (*protocol == 45) {
+		return XDP_DROP;
+	} else if (*protocol == 59) {
+		action = parse_icmpv6(data, data_end, off, pckt);
+		if (action >= 0)
+			return action;
+	} else {
+		memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16);
+		memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16);
+	}
+	return -1;
+}
+
+__attribute__ ((noinline))
+static int process_l3_headers_v4(struct packet_description *pckt,
+				 __u8 *protocol, __u64 off,
+				 __u16 *pkt_bytes, void *data,
+				 void *data_end)
+{
+	struct iphdr *iph;
+	__u64 iph_len;
+	int action;
+
+	iph = data + off;
+	if (iph + 1 > data_end)
+		return XDP_DROP;
+	if (iph->ihl != 5)
+		return XDP_DROP;
+	*protocol = iph->protocol;
+	pckt->flow.proto = *protocol;
+	*pkt_bytes = __builtin_bswap16(iph->tot_len);
+	off += 20;
+	if (iph->frag_off & 65343)
+		return XDP_DROP;
+	if (*protocol == IPPROTO_ICMP) {
+		action = parse_icmp(data, data_end, off, pckt);
+		if (action >= 0)
+			return action;
+	} else {
+		pckt->flow.src = iph->saddr;
+		pckt->flow.dst = iph->daddr;
+	}
+	return -1;
+}
+
+__attribute__ ((noinline))
+static int process_packet(void *data, __u64 off, void *data_end,
+			  bool is_ipv6, struct xdp_md *xdp)
+{
+
+	struct real_definition *dst = NULL;
+	struct packet_description pckt = { };
+	struct vip_definition vip = { };
+	struct lb_stats *data_stats;
+	struct eth_hdr *eth = data;
+	void *lru_map = &lru_cache;
+	struct vip_meta *vip_info;
+	__u32 lru_stats_key = 513;
+	__u32 mac_addr_pos = 0;
+	__u32 stats_key = 512;
+	struct ctl_value *cval;
+	__u16 pkt_bytes;
+	__u64 iph_len;
+	__u8 protocol;
+	__u32 vip_num;
+	int action;
+
+	if (is_ipv6)
+		action = process_l3_headers_v6(&pckt, &protocol, off,
+					       &pkt_bytes, data, data_end);
+	else
+		action = process_l3_headers_v4(&pckt, &protocol, off,
+					       &pkt_bytes, data, data_end);
+	if (action >= 0)
+		return action;
+	protocol = pckt.flow.proto;
+	if (protocol == IPPROTO_TCP) {
+		if (!parse_tcp(data, data_end, is_ipv6, &pckt))
+			return XDP_DROP;
+	} else if (protocol == IPPROTO_UDP) {
+		if (!parse_udp(data, data_end, is_ipv6, &pckt))
+			return XDP_DROP;
+	} else {
+		return XDP_TX;
+	}
+
+	if (is_ipv6)
+		memcpy(vip.vipv6, pckt.flow.dstv6, 16);
+	else
+		vip.vip = pckt.flow.dst;
+	vip.port = pckt.flow.port16[1];
+	vip.proto = pckt.flow.proto;
+	vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+	if (!vip_info) {
+		vip.port = 0;
+		vip_info = bpf_map_lookup_elem(&vip_map, &vip);
+		if (!vip_info)
+			return XDP_PASS;
+		if (!(vip_info->flags & (1 << 4)))
+			pckt.flow.port16[1] = 0;
+	}
+	if (data_end - data > 1400)
+		return XDP_DROP;
+	data_stats = bpf_map_lookup_elem(&stats, &stats_key);
+	if (!data_stats)
+		return XDP_DROP;
+	data_stats->v1 += 1;
+	if (!dst) {
+		if (vip_info->flags & (1 << 0))
+			pckt.flow.port16[0] = 0;
+		if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1)))
+			connection_table_lookup(&dst, &pckt, lru_map);
+		if (dst)
+			goto out;
+		if (pckt.flow.proto == IPPROTO_TCP) {
+			struct lb_stats *lru_stats =
+			    bpf_map_lookup_elem(&stats, &lru_stats_key);
+
+			if (!lru_stats)
+				return XDP_DROP;
+			if (pckt.flags & (1 << 1))
+				lru_stats->v1 += 1;
+			else
+				lru_stats->v2 += 1;
+		}
+		if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map))
+			return XDP_DROP;
+		data_stats->v2 += 1;
+	}
+out:
+	cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos);
+	if (!cval)
+		return XDP_DROP;
+	if (dst->flags & (1 << 0)) {
+		if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes))
+			return XDP_DROP;
+	} else {
+		if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes))
+			return XDP_DROP;
+	}
+	vip_num = vip_info->vip_num;
+	data_stats = bpf_map_lookup_elem(&stats, &vip_num);
+	if (!data_stats)
+		return XDP_DROP;
+	data_stats->v1 += 1;
+	data_stats->v2 += pkt_bytes;
+
+	data = (void *)(long)xdp->data;
+	data_end = (void *)(long)xdp->data_end;
+	if (data + 4 > data_end)
+		return XDP_DROP;
+	*(u32 *)data = dst->dst;
+	return XDP_DROP;
+}
+
+__attribute__ ((section("xdp-test"), used))
+int balancer_ingress(struct xdp_md *ctx)
+{
+	void *data = (void *)(long)ctx->data;
+	void *data_end = (void *)(long)ctx->data_end;
+	struct eth_hdr *eth = data;
+	__u32 eth_proto;
+	__u32 nh_off;
+
+	nh_off = sizeof(struct eth_hdr);
+	if (data + nh_off > data_end)
+		return XDP_DROP;
+	eth_proto = eth->eth_proto;
+	if (eth_proto == 8)
+		return process_packet(data, nh_off, data_end, 0, ctx);
+	else if (eth_proto == 56710)
+		return process_packet(data, nh_off, data_end, 1, ctx);
+	else
+		return XDP_DROP;
+}
+
+char _license[] __attribute__ ((section("license"), used)) = "GPL";
+int _version __attribute__ ((section("version"), used)) = 1;
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 500c74d..d7c30d3 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -5,6 +5,7 @@
 CFLAGS += -I../../../../usr/include/
 
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
+TEST_PROGS += fib_tests.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
new file mode 100755
index 0000000..a9154ee
--- /dev/null
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -0,0 +1,429 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking IPv4 and IPv6 FIB behavior in response to
+# different events.
+
+ret=0
+
+check_err()
+{
+	if [ $ret -eq 0 ]; then
+		ret=$1
+	fi
+}
+
+check_fail()
+{
+	if [ $1 -eq 0 ]; then
+		ret=1
+	fi
+}
+
+netns_create()
+{
+	local testns=$1
+
+	ip netns add $testns
+	ip netns exec $testns ip link set dev lo up
+}
+
+fib_unreg_unicast_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy0
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_fail $?
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: unicast route test"
+		return 1
+	fi
+	echo "PASS: unicast route test"
+}
+
+fib_unreg_multipath_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link add dummy1 type dummy
+	ip netns exec testns ip link set dev dummy1 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+	ip netns exec testns ip route add 203.0.113.0/24 \
+		nexthop via 198.51.100.2 dev dummy0 \
+		nexthop via 192.0.2.2 dev dummy1
+	ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+		nexthop via 2001:db8:1::2 dev dummy0 \
+		nexthop via 2001:db8:2::2 dev dummy1
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy0
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	# In IPv6 we do not flush the entire multipath route.
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy1
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: multipath route test"
+		return 1
+	fi
+	echo "PASS: multipath route test"
+}
+
+fib_unreg_test()
+{
+	echo "Running netdev unregister tests"
+
+	fib_unreg_unicast_test
+	fib_unreg_multipath_test
+}
+
+fib_down_unicast_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link set dev dummy0 down
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_fail $?
+
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: unicast route test"
+		return 1
+	fi
+	echo "PASS: unicast route test"
+}
+
+fib_down_multipath_test_do()
+{
+	local down_dev=$1
+	local up_dev=$2
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 \
+		oif $down_dev &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+		oif $down_dev &> /dev/null
+	check_fail $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 \
+		oif $up_dev &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \
+		oif $up_dev &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+		grep $down_dev | grep -q "dead linkdown"
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+		grep $down_dev | grep -q "dead linkdown"
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 | \
+		grep $up_dev | grep -q "dead linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \
+		grep $up_dev | grep -q "dead linkdown"
+	check_fail $?
+}
+
+fib_down_multipath_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link add dummy1 type dummy
+	ip netns exec testns ip link set dev dummy1 up
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy1
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1
+
+	ip netns exec testns ip route add 203.0.113.0/24 \
+		nexthop via 198.51.100.2 dev dummy0 \
+		nexthop via 192.0.2.2 dev dummy1
+	ip netns exec testns ip -6 route add 2001:db8:3::/64 \
+		nexthop via 2001:db8:1::2 dev dummy0 \
+		nexthop via 2001:db8:2::2 dev dummy1
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip link set dev dummy0 down
+	check_err $?
+
+	fib_down_multipath_test_do "dummy0" "dummy1"
+
+	ip netns exec testns ip link set dev dummy0 up
+	check_err $?
+	ip netns exec testns ip link set dev dummy1 down
+	check_err $?
+
+	fib_down_multipath_test_do "dummy1" "dummy0"
+
+	ip netns exec testns ip link set dev dummy0 down
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null
+	check_fail $?
+
+	ip netns exec testns ip link del dev dummy1
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: multipath route test"
+		return 1
+	fi
+	echo "PASS: multipath route test"
+}
+
+fib_down_test()
+{
+	echo "Running netdev down tests"
+
+	fib_down_unicast_test
+	fib_down_multipath_test
+}
+
+fib_carrier_local_test()
+{
+	ret=0
+
+	# Local routes should not be affected when carrier changes.
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link set dev dummy0 carrier on
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip link set dev dummy0 carrier off
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.1 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 192.0.2.1 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 192.0.2.1 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: local route carrier test"
+		return 1
+	fi
+	echo "PASS: local route carrier test"
+}
+
+fib_carrier_unicast_test()
+{
+	ret=0
+
+	netns_create "testns"
+
+	ip netns exec testns ip link add dummy0 type dummy
+	ip netns exec testns ip link set dev dummy0 up
+
+	ip netns exec testns ip link set dev dummy0 carrier on
+
+	ip netns exec testns ip address add 198.51.100.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+		grep -q "linkdown"
+	check_fail $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+		grep -q "linkdown"
+	check_fail $?
+
+	ip netns exec testns ip link set dev dummy0 carrier off
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 198.51.100.2 | \
+		grep -q "linkdown"
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \
+		grep -q "linkdown"
+	check_err $?
+
+	ip netns exec testns ip address add 192.0.2.1/24 dev dummy0
+	ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0
+
+	ip netns exec testns ip route get fibmatch 192.0.2.2 &> /dev/null
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 &> /dev/null
+	check_err $?
+
+	ip netns exec testns ip route get fibmatch 192.0.2.2 | \
+		grep -q "linkdown"
+	check_err $?
+	ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 | \
+		grep -q "linkdown"
+	check_err $?
+
+	ip netns exec testns ip link del dev dummy0
+
+	ip netns del testns
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: unicast route carrier test"
+		return 1
+	fi
+	echo "PASS: unicast route carrier test"
+}
+
+fib_carrier_test()
+{
+	echo "Running netdev carrier change tests"
+
+	fib_carrier_local_test
+	fib_carrier_unicast_test
+}
+
+fib_test()
+{
+	fib_unreg_test
+	fib_down_test
+	fib_carrier_test
+}
+
+if [ "$(id -u)" -ne 0 ];then
+	echo "SKIP: Need root privileges"
+	exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+	echo "SKIP: Could not run test without ip tool"
+	exit 0
+fi
+
+ip route help 2>&1 | grep -q fibmatch
+if [ $? -ne 0 ]; then
+	echo "SKIP: iproute2 too old, missing fibmatch"
+	exit 0
+fi
+
+fib_test
+
+exit $ret
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 3ab6ec4..e11fe84 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len)
 	return sizeof(*ip6h);
 }
 
-static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
+
+static void setup_sockaddr(int domain, const char *str_addr,
+			   struct sockaddr_storage *sockaddr)
 {
 	struct sockaddr_in6 *addr6 = (void *) sockaddr;
 	struct sockaddr_in *addr4 = (void *) sockaddr;
 
 	switch (domain) {
 	case PF_INET:
+		memset(addr4, 0, sizeof(*addr4));
 		addr4->sin_family = AF_INET;
 		addr4->sin_port = htons(cfg_port);
-		if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+		if (str_addr &&
+		    inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
 			error(1, 0, "ipv4 parse error: %s", str_addr);
 		break;
 	case PF_INET6:
+		memset(addr6, 0, sizeof(*addr6));
 		addr6->sin6_family = AF_INET6;
 		addr6->sin6_port = htons(cfg_port);
-		if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+		if (str_addr &&
+		    inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
 			error(1, 0, "ipv6 parse error: %s", str_addr);
 		break;
 	default:
@@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv)
 				    sizeof(struct tcphdr) -
 				    40 /* max tcp options */;
 	int c;
+	char *daddr = NULL, *saddr = NULL;
 
 	cfg_payload_len = max_payload_len;
 
@@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv)
 			cfg_cpu = strtol(optarg, NULL, 0);
 			break;
 		case 'D':
-			setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+			daddr = optarg;
 			break;
 		case 'i':
 			cfg_ifindex = if_nametoindex(optarg);
@@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv)
 			cfg_cork_mixed = true;
 			break;
 		case 'p':
-			cfg_port = htons(strtoul(optarg, NULL, 0));
+			cfg_port = strtoul(optarg, NULL, 0);
 			break;
 		case 'r':
 			cfg_rx = true;
@@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv)
 			cfg_payload_len = strtoul(optarg, NULL, 0);
 			break;
 		case 'S':
-			setup_sockaddr(cfg_family, optarg, &cfg_src_addr);
+			saddr = optarg;
 			break;
 		case 't':
 			cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000;
@@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv)
 			break;
 		}
 	}
+	setup_sockaddr(cfg_family, daddr, &cfg_dst_addr);
+	setup_sockaddr(cfg_family, saddr, &cfg_src_addr);
 
 	if (cfg_payload_len > max_payload_len)
 		error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 5215493..a622eee 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -502,6 +502,231 @@
 	echo "PASS: macsec"
 }
 
+kci_test_gretap()
+{
+	testns="testns"
+	DEV_NS=gretap00
+	ret=0
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP gretap tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	ip link help gretap 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: gretap: iproute2 too old"
+		return 1
+	fi
+
+	# test native tunnel
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap seq \
+		key 102 local 172.16.1.100 remote 172.16.1.200
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type gretap external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: gretap"
+		return 1
+	fi
+	echo "PASS: gretap"
+
+	ip netns del "$testns"
+}
+
+kci_test_ip6gretap()
+{
+	testns="testns"
+	DEV_NS=ip6gretap00
+	ret=0
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP ip6gretap tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	ip link help ip6gretap 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: ip6gretap: iproute2 too old"
+		return 1
+	fi
+
+	# test native tunnel
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap seq \
+		key 102 local fc00:100::1 remote fc00:100::2
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" fc00:200::1/96
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6gretap external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: ip6gretap"
+		return 1
+	fi
+	echo "PASS: ip6gretap"
+
+	ip netns del "$testns"
+}
+
+kci_test_erspan()
+{
+	testns="testns"
+	DEV_NS=erspan00
+	ret=0
+
+	ip link help erspan 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: erspan: iproute2 too old"
+		return 1
+	fi
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP erspan tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	# test native tunnel erspan v1
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+		key 102 local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 1 erspan 488
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test native tunnel erspan v2
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \
+		key 102 local 172.16.1.100 remote 172.16.1.200 \
+		erspan_ver 2 erspan_dir ingress erspan_hwid 7
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: erspan"
+		return 1
+	fi
+	echo "PASS: erspan"
+
+	ip netns del "$testns"
+}
+
+kci_test_ip6erspan()
+{
+	testns="testns"
+	DEV_NS=ip6erspan00
+	ret=0
+
+	ip link help ip6erspan 2>&1 | grep -q "^Usage:"
+	if [ $? -ne 0 ];then
+		echo "SKIP: ip6erspan: iproute2 too old"
+		return 1
+	fi
+
+	ip netns add "$testns"
+	if [ $? -ne 0 ]; then
+		echo "SKIP ip6erspan tests: cannot add net namespace $testns"
+		return 1
+	fi
+
+	# test native tunnel ip6erspan v1
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+		key 102 local fc00:100::1 remote fc00:100::2 \
+		erspan_ver 1 erspan 488
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test native tunnel ip6erspan v2
+	ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \
+		key 102 local fc00:100::1 remote fc00:100::2 \
+		erspan_ver 2 erspan_dir ingress erspan_hwid 7
+	check_err $?
+
+	ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24
+	check_err $?
+
+	ip netns exec "$testns" ip link set dev $DEV_NS up
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	# test external mode
+	ip netns exec "$testns" ip link add dev "$DEV_NS" \
+		type ip6erspan external
+	check_err $?
+
+	ip netns exec "$testns" ip link del "$DEV_NS"
+	check_err $?
+
+	if [ $ret -ne 0 ]; then
+		echo "FAIL: ip6erspan"
+		return 1
+	fi
+	echo "PASS: ip6erspan"
+
+	ip netns del "$testns"
+}
+
 kci_test_rtnl()
 {
 	kci_add_dummy
@@ -514,6 +739,10 @@
 	kci_test_route_get
 	kci_test_tc
 	kci_test_gre
+	kci_test_gretap
+	kci_test_ip6gretap
+	kci_test_erspan
+	kci_test_ip6erspan
 	kci_test_bridge
 	kci_test_addrlabel
 	kci_test_ifalias