Merge tag 'char-misc-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver patches from Greg KH:
"Here's the "big" char/misc driver update for 4.3-rc1.
Not much really interesting here, just a number of little changes all
over the place, and some nice consolidation of the nvmem drivers to a
common framework. As usual, the mei drivers stand out as the largest
"churn" to handle new devices and features in their hardware.
All have been in linux-next for a while with no issues"
* tag 'char-misc-4.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (136 commits)
auxdisplay: ks0108: initialize local parport variable
extcon: palmas: Fix build break due to devm_gpiod_get_optional API change
extcon: palmas: Support GPIO based USB ID detection
extcon: Fix signedness bugs about break error handling
extcon: Drop owner assignment from i2c_driver
extcon: arizona: Simplify pdata symantics for micd_dbtime
extcon: arizona: Declare 3-pole jack if we detect open circuit on mic
extcon: Add exception handling to prevent the NULL pointer access
extcon: arizona: Ensure variables are set for headphone detection
extcon: arizona: Use gpiod inteface to handle micd_pol_gpio gpio
extcon: arizona: Add basic microphone detection DT/ACPI bindings
extcon: arizona: Update to use the new device properties API
extcon: palmas: Remove the mutually_exclusive array
extcon: Remove optional print_state() function pointer of struct extcon_dev
extcon: Remove duplicate header file in extcon.h
extcon: max77843: Clear IRQ bits state before request IRQ
toshiba laptop: replace ioremap_cache with ioremap
misc: eeprom: max6875: clean up max6875_read()
misc: eeprom: clean up eeprom_read()
misc: eeprom: 93xx46: clean up eeprom_93xx46_bin_read/write
...
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644
index 0000000..cca6d87
--- /dev/null
+++ b/.get_maintainer.ignore
@@ -0,0 +1 @@
+Christoph Hellwig <hch@lst.de>
diff --git a/.mailmap b/.mailmap
index b4091b7..4b31af5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andreas Herrmann <aherrman@de.ibm.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrew Morton <akpm@linux-foundation.org>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andy Adamson <andros@citi.umich.edu>
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index d6b794c..91e6e5c 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -199,6 +199,7 @@
"qcom,kpss-acc-v1"
"qcom,kpss-acc-v2"
"rockchip,rk3066-smp"
+ "ste,dbx500-smp"
- cpu-release-addr
Usage: required for systems that have an "enable-method"
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 10c874eb..9189535 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,8 +16,6 @@
- hints for debugging on s390 systems.
driver-model.txt
- information on s390 devices and the driver model.
-kvm.txt
- - ioctl calls to /dev/kvm on s390.
monreader.txt
- information on accessing the z/VM monitor stream from Linux.
qeth.txt
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
deleted file mode 100644
index 85f3280..0000000
--- a/Documentation/s390/kvm.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-*** BIG FAT WARNING ***
-The kvm module is currently in EXPERIMENTAL state for s390. This means that
-the interface to the module is not yet considered to remain stable. Thus, be
-prepared that we keep breaking your userspace application and guest
-compatibility over and over again until we feel happy with the result. Make sure
-your guest kernel, your host kernel, and your userspace launcher are in a
-consistent state.
-
-This Documentation describes the unique ioctl calls to /dev/kvm, the resulting
-kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
-
-1. ioctl calls to /dev/kvm
-KVM does support the following ioctls on s390 that are common with other
-architectures and do behave the same:
-KVM_GET_API_VERSION
-KVM_CREATE_VM (*) see note
-KVM_CHECK_EXTENSION
-KVM_GET_VCPU_MMAP_SIZE
-
-Notes:
-* KVM_CREATE_VM may fail on s390, if the calling process has multiple
-threads and has not called KVM_S390_ENABLE_SIE before.
-
-In addition, on s390 the following architecture specific ioctls are supported:
-ioctl: KVM_S390_ENABLE_SIE
-args: none
-see also: include/linux/kvm.h
-This call causes the kernel to switch on PGSTE in the user page table. This
-operation is needed in order to run a virtual machine, and it requires the
-calling process to be single-threaded. Note that the first call to KVM_CREATE_VM
-will implicitly try to switch on PGSTE if the user process has not called
-KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
-before creating a virtual machine have to call KVM_S390_ENABLE_SIE, or will
-observe an error calling KVM_CREATE_VM. Switching on PGSTE is a one-time
-operation, is not reversible, and will persist over the entire lifetime of
-the calling process. It does not have any user-visible effect other than a small
-performance penalty.
-
-2. ioctl calls to the kvm-vm file descriptor
-KVM does support the following ioctls on s390 that are common with other
-architectures and do behave the same:
-KVM_CREATE_VCPU
-KVM_SET_USER_MEMORY_REGION (*) see note
-KVM_GET_DIRTY_LOG (**) see note
-
-Notes:
-* kvm does only allow exactly one memory slot on s390, which has to start
- at guest absolute address zero and at a user address that is aligned on any
- page boundary. This hardware "limitation" allows us to have a few unique
- optimizations. The memory slot doesn't have to be filled
- with memory actually, it may contain sparse holes. That said, with different
- user memory layout this does still allow a large flexibility when
- doing the guest memory setup.
-** KVM_GET_DIRTY_LOG doesn't work properly yet. The user will receive an empty
-log. This ioctl call is only needed for guest migration, and we intend to
-implement this one in the future.
-
-In addition, on s390 the following architecture specific ioctls for the kvm-vm
-file descriptor are supported:
-ioctl: KVM_S390_INTERRUPT
-args: struct kvm_s390_interrupt *
-see also: include/linux/kvm.h
-This ioctl is used to submit a floating interrupt for a virtual machine.
-Floating interrupts may be delivered to any virtual cpu in the configuration.
-Only some interrupt types defined in include/linux/kvm.h make sense when
-submitted as floating interrupts. The following interrupts are not considered
-to be useful as floating interrupts, and a call to inject them will result in
--EINVAL error code: program interrupts and interprocessor signals. Valid
-floating interrupts are:
-KVM_S390_INT_VIRTIO
-KVM_S390_INT_SERVICE
-
-3. ioctl calls to the kvm-vcpu file descriptor
-KVM does support the following ioctls on s390 that are common with other
-architectures and do behave the same:
-KVM_RUN
-KVM_GET_REGS
-KVM_SET_REGS
-KVM_GET_SREGS
-KVM_SET_SREGS
-KVM_GET_FPU
-KVM_SET_FPU
-
-In addition, on s390 the following architecture specific ioctls for the
-kvm-vcpu file descriptor are supported:
-ioctl: KVM_S390_INTERRUPT
-args: struct kvm_s390_interrupt *
-see also: include/linux/kvm.h
-This ioctl is used to submit an interrupt for a specific virtual cpu.
-Only some interrupt types defined in include/linux/kvm.h make sense when
-submitted for a specific cpu. The following interrupts are not considered
-to be useful, and a call to inject them will result in -EINVAL error code:
-service processor calls and virtio interrupts. Valid interrupt types are:
-KVM_S390_PROGRAM_INT
-KVM_S390_SIGP_STOP
-KVM_S390_RESTART
-KVM_S390_SIGP_SET_PREFIX
-KVM_S390_INT_EMERGENCY
-
-ioctl: KVM_S390_STORE_STATUS
-args: unsigned long
-see also: include/linux/kvm.h
-This ioctl stores the state of the cpu at the guest real address given as
-argument, unless one of the following values defined in include/linux/kvm.h
-is given as argument:
-KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
-absolute lowcore as defined by the principles of operation
-KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
-its prefix page just like the dump tool that comes with zipl. This is useful
-to create a system dump for use with lkcdutils or crash.
-
-ioctl: KVM_S390_SET_INITIAL_PSW
-args: struct kvm_s390_psw *
-see also: include/linux/kvm.h
-This ioctl can be used to set the processor status word (psw) of a stopped cpu
-prior to running it with KVM_RUN. Note that this call is not required to modify
-the psw during sie intercepts that fall back to userspace because struct kvm_run
-does contain the psw, and this value is evaluated during reentry of KVM_RUN
-after the intercept exit was recognized.
-
-ioctl: KVM_S390_INITIAL_RESET
-args: none
-see also: include/linux/kvm.h
-This ioctl can be used to perform an initial cpu reset as defined by the
-principles of operation. The target cpu has to be in stopped state.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index a7926a9..a4ebcb7 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3277,6 +3277,7 @@
struct {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
+#define KVM_SYSTEM_EVENT_CRASH 3
__u32 type;
__u64 flags;
} system_event;
@@ -3296,6 +3297,10 @@
KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
As with SHUTDOWN, userspace can choose to ignore the request, or
to schedule the reset to occur in the future and may call KVM_RUN again.
+ KVM_SYSTEM_EVENT_CRASH -- the guest crash occurred and the guest
+ has requested a crash condition maintenance. Userspace can choose
+ to ignore the request, or to gather VM memory core dump and/or
+ reset/shutdown of the VM.
/* Fix the size of the union. */
char padding[256];
diff --git a/MAINTAINERS b/MAINTAINERS
index d954927..7899b3f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3587,6 +3587,15 @@
F: drivers/gpu/drm/rockchip/
F: Documentation/devicetree/bindings/video/rockchip*
+DRM DRIVERS FOR STI
+M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Vincent Abriou <vincent.abriou@st.com>
+L: dri-devel@lists.freedesktop.org
+T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S: Maintained
+F: drivers/gpu/drm/sti
+F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -5841,6 +5850,7 @@
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
M: "J. Bruce Fields" <bfields@fieldses.org>
+M: Jeff Layton <jlayton@poochiereds.net>
L: linux-nfs@vger.kernel.org
W: http://nfs.sourceforge.net/
S: Supported
diff --git a/Makefile b/Makefile
index 35b4c19..c361593 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d2..7451b44 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+bootpImage uImage: zImage
+zImage: Image
+
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 4a0718c..1e29ccf 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 {
- compatible = "syscon";
+ compatible = "syscon", "simple-bus";
reg = <0x0 0x1400>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6d1359..b57033e 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -181,10 +181,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
<&clks IMX6QDL_CLK_LVDS1_GATE>,
<&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
index 1b6494f..675fb8e 100644
--- a/arch/arm/boot/dts/k2e.dtsi
+++ b/arch/arm/boot/dts/k2e.dtsi
@@ -131,10 +131,17 @@
<GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
};
};
+
+ mdio: mdio@24200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x24200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2e-netcp.dtsi"
};
};
-
-&mdio {
- reg = <0x24200f00 0x100>;
-};
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
index ae64724..d0810a5 100644
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ b/arch/arm/boot/dts/k2hk.dtsi
@@ -98,6 +98,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x25c>;
};
+
+ mdio: mdio@02090300 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x02090300 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2hk-netcp.dtsi"
};
};
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
index 0e00748..49fd414 100644
--- a/arch/arm/boot/dts/k2l.dtsi
+++ b/arch/arm/boot/dts/k2l.dtsi
@@ -29,7 +29,6 @@
};
soc {
-
/include/ "k2l-clocks.dtsi"
uart2: serial@02348400 {
@@ -79,6 +78,17 @@
#gpio-cells = <2>;
gpio,syscon-dev = <&devctrl 0x24c>;
};
+
+ mdio: mdio@26200f00 {
+ compatible = "ti,keystone_mdio", "ti,davinci_mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x26200f00 0x100>;
+ status = "disabled";
+ clocks = <&clkcpgmac>;
+ clock-names = "fck";
+ bus_freq = <2500000>;
+ };
/include/ "k2l-netcp.dtsi"
};
};
@@ -96,7 +106,3 @@
/* Pin muxed. Enabled and configured by Bootloader */
status = "disabled";
};
-
-&mdio {
- reg = <0x26200f00 0x100>;
-};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index e7a6f6d..72816d6 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -267,17 +267,6 @@
1 0 0x21000A00 0x00000100>;
};
- mdio: mdio@02090300 {
- compatible = "ti,keystone_mdio", "ti,davinci_mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x02090300 0x100>;
- status = "disabled";
- clocks = <&clkpa>;
- clock-names = "fck";
- bus_freq = <2500000>;
- };
-
kirq0: keystone_irq@26202a0 {
compatible = "ti,keystone-irq";
interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963b..2390f38 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
};
scm_conf: scm_conf@270 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x270 0x240>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 7d31c6f..abc4473 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
};
omap4_padconf_global: omap4_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0x170>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index c8fd648..b1a1263 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
};
omap5_padconf_global: omap5_padconf_global@5a0 {
- compatible = "syscon";
+ compatible = "syscon",
+ "simple-bus";
reg = <0x5a0 0xec>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index a75f328..b8f81fb 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -15,6 +15,33 @@
#include "skeleton.dtsi"
/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ enable-method = "ste,dbx500-smp";
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+ core1 {
+ cpu = <&CPU1>;
+ };
+ };
+ };
+ CPU0: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x300>;
+ };
+ CPU1: cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a9";
+ reg = <0x301>;
+ };
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -22,32 +49,6 @@
interrupt-parent = <&intc>;
ranges;
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu-map {
- cluster0 {
- core0 {
- cpu = <&CPU0>;
- };
- core1 {
- cpu = <&CPU1>;
- };
- };
- };
- CPU0: cpu@0 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <0>;
- };
- CPU1: cpu@1 {
- device_type = "cpu";
- compatible = "arm,cortex-a9";
- reg = <1>;
- };
- };
-
ptm@801ae000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
reg = <0x801ae000 0x1000>;
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1..b48dd4f 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -61,6 +61,7 @@
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
+ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d9..29e2991 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
+ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd..54a5aea 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@@ -308,10 +307,10 @@
vdso_write_begin(vdso_data);
- xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d71..4b39af2 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@
}
/* the mmap semaphore is taken only if not in an atomic context */
- atomic = in_atomic();
+ atomic = faulthandler_disabled();
if (!atomic)
down_read(¤t->mm->mmap_sem);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 6001f1c..4a87e86 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -146,9 +146,8 @@
pd->base = of_iomap(np, 0);
if (!pd->base) {
pr_warn("%s: failed to map memory\n", __func__);
- kfree(pd->pd.name);
+ kfree_const(pd->pd.name);
kfree(pd);
- of_node_put(np);
continue;
}
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 8e52621..e1d2e99 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -392,6 +392,7 @@
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d9..1160434 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3..97bc68f 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
- xtime_coarse = __current_kernel_time();
vdso_data->use_syscall = use_syscall;
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index f02530e..85c5715 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -168,8 +168,8 @@
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, false, addr);
-
- inject_abt64(vcpu, false, addr);
+ else
+ inject_abt64(vcpu, false, addr);
}
/**
@@ -184,8 +184,8 @@
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, true, addr);
-
- inject_abt64(vcpu, true, addr);
+ else
+ inject_abt64(vcpu, true, addr);
}
/**
@@ -198,6 +198,6 @@
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_undef32(vcpu);
-
- inject_undef64(vcpu);
+ else
+ inject_undef64(vcpu);
}
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index af42e70..baa7b6f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
- __BUILD_clear_\clear
+ __build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d4463..a6f6b76 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_64_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc65..4b20106 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_N32_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 42e02a2..efc3fa5 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -191,6 +191,9 @@
pci_device_add(dev, bus);
+ /* Setup MSI caps & disable MSI/MSI-X interrupts */
+ pci_msi_setup_pci_dev(dev);
+
return dev;
}
EXPORT_SYMBOL(of_create_pci_dev);
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index 629b79a..f7e5c36 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -214,6 +214,9 @@
void etr_switch_to_local(void);
void etr_sync_check(void);
+/* notifier for syncs */
+extern struct atomic_notifier_head s390_epoch_delta_notifier;
+
/* STP interruption parameter */
struct stp_irq_parm {
unsigned int _pad0 : 14;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3024acb..df4db81 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -258,6 +258,9 @@
u32 diagnose_10;
u32 diagnose_44;
u32 diagnose_9c;
+ u32 diagnose_258;
+ u32 diagnose_308;
+ u32 diagnose_500;
};
#define PGM_OPERATION 0x01
@@ -630,7 +633,6 @@
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_check_processor_compat(void *rtn) {}
-static inline void kvm_arch_exit(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 9e733d9..627887b 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -58,6 +58,9 @@
static DEFINE_PER_CPU(struct clock_event_device, comparators);
+ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
+EXPORT_SYMBOL(s390_epoch_delta_notifier);
+
/*
* Scheduler clock - returns current time in nanosec units.
*/
@@ -752,7 +755,7 @@
static int etr_sync_clock(void *data)
{
static int first;
- unsigned long long clock, old_clock, delay, delta;
+ unsigned long long clock, old_clock, clock_delta, delay, delta;
struct clock_sync_data *etr_sync;
struct etr_aib *sync_port, *aib;
int port;
@@ -789,6 +792,9 @@
delay = (unsigned long long)
(aib->edf2.etv - sync_port->edf2.etv) << 32;
delta = adjust_time(old_clock, clock, delay);
+ clock_delta = clock - old_clock;
+ atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0,
+ &clock_delta);
etr_sync->fixup_cc = delta;
fixup_clock_comparator(delta);
/* Verify that the clock is properly set. */
@@ -1526,7 +1532,7 @@
static int stp_sync_clock(void *data)
{
static int first;
- unsigned long long old_clock, delta;
+ unsigned long long old_clock, delta, new_clock, clock_delta;
struct clock_sync_data *stp_sync;
int rc;
@@ -1551,7 +1557,11 @@
old_clock = get_tod_clock();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
if (rc == 0) {
- delta = adjust_time(old_clock, get_tod_clock(), 0);
+ new_clock = get_tod_clock();
+ delta = adjust_time(old_clock, new_clock, 0);
+ clock_delta = new_clock - old_clock;
+ atomic_notifier_call_chain(&s390_epoch_delta_notifier,
+ 0, &clock_delta);
fixup_clock_comparator(delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index fc7ec95..5fbfb88 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -27,13 +27,13 @@
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
+ vcpu->stat.diagnose_10++;
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
|| start < 2 * PAGE_SIZE)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
- vcpu->stat.diagnose_10++;
/*
* We checked for start >= end above, so lets check for the
@@ -75,6 +75,9 @@
u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
+ VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
+ vcpu->run->s.regs.gprs[rx]);
+ vcpu->stat.diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -85,6 +88,9 @@
switch (parm.subcode) {
case 0: /* TOKEN */
+ VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
+ "select mask 0x%llx compare mask 0x%llx",
+ parm.token_addr, parm.select_mask, parm.compare_mask);
if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
/*
* If the pagefault handshake is already activated,
@@ -114,6 +120,7 @@
* the cancel, therefore to reduce code complexity, we assume
* all outstanding tokens are already pending.
*/
+ VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
if (parm.token_addr || parm.select_mask ||
parm.compare_mask || parm.zarch)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -174,7 +181,8 @@
unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
- VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
+ VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
+ vcpu->stat.diagnose_308++;
switch (subcode) {
case 3:
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -202,6 +210,7 @@
{
int ret;
+ vcpu->stat.diagnose_500++;
/* No virtio-ccw notification? Get out quickly. */
if (!vcpu->kvm->arch.css_support ||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index e97b345..47518a3 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -473,10 +473,45 @@
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
}
+#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
+#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
+#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
+#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
+
void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
{
+ int new_as;
+
if (debug_exit_required(vcpu))
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
filter_guest_per_event(vcpu);
+
+ /*
+ * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
+ * a space-switch event. PER events enforce space-switch events
+ * for these instructions. So if no PER event for the guest is left,
+ * we might have to filter the space-switch element out, too.
+ */
+ if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
+ vcpu->arch.sie_block->iprcc = 0;
+ new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
+
+ /*
+ * If the AS changed from / to home, we had RP, SAC or SACF
+ * instruction. Check primary and home space-switch-event
+ * controls. (theoretically home -> home produced no event)
+ */
+ if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) &&
+ (pssec(vcpu) || hssec(vcpu)))
+ vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
+
+ /*
+ * PT, PTI, PR, PC instruction operate on primary AS only. Check
+ * if the primary-space-switch-event control was or got set.
+ */
+ if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) &&
+ (pssec(vcpu) || old_ssec(vcpu)))
+ vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
+ }
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c98d897..b277d50 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -30,7 +30,6 @@
#define IOINT_SCHID_MASK 0x0000ffff
#define IOINT_SSID_MASK 0x00030000
#define IOINT_CSSID_MASK 0x03fc0000
-#define IOINT_AI_MASK 0x04000000
#define PFAULT_INIT 0x0600
#define PFAULT_DONE 0x0680
#define VIRTIO_PARAM 0x0d00
@@ -72,9 +71,13 @@
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
{
+ preempt_disable();
if (!(vcpu->arch.sie_block->ckc <
- get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
+ get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
+ preempt_enable();
return 0;
+ }
+ preempt_enable();
return ckc_interrupts_enabled(vcpu);
}
@@ -311,8 +314,8 @@
li->irq.ext.ext_params2 = 0;
spin_unlock(&li->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
- 0, ext.ext_params2);
+ VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
+ ext.ext_params2);
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_PFAULT_INIT,
0, ext.ext_params2);
@@ -368,7 +371,7 @@
spin_unlock(&fi->lock);
if (deliver) {
- VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+ VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
mchk.mcic);
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_MCHK,
@@ -403,7 +406,7 @@
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc;
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
+ VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
vcpu->stat.deliver_restart_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
@@ -427,7 +430,6 @@
clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
spin_unlock(&li->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
vcpu->stat.deliver_prefix_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_SIGP_SET_PREFIX,
@@ -450,7 +452,7 @@
clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
spin_unlock(&li->lock);
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
+ VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
vcpu->stat.deliver_emergency_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
cpu_addr, 0);
@@ -477,7 +479,7 @@
clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
spin_unlock(&li->lock);
- VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
+ VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
vcpu->stat.deliver_external_call++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_INT_EXTERNAL_CALL,
@@ -506,7 +508,7 @@
memset(&li->irq.pgm, 0, sizeof(pgm_info));
spin_unlock(&li->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
+ VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
pgm_info.code, ilc);
vcpu->stat.deliver_program_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
@@ -622,7 +624,7 @@
clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
spin_unlock(&fi->lock);
- VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
+ VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
ext.ext_params);
vcpu->stat.deliver_service_signal++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
@@ -651,9 +653,6 @@
struct kvm_s390_interrupt_info,
list);
if (inti) {
- trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
- KVM_S390_INT_PFAULT_DONE, 0,
- inti->ext.ext_params2);
list_del(&inti->list);
fi->counters[FIRQ_CNTR_PFAULT] -= 1;
}
@@ -662,6 +661,12 @@
spin_unlock(&fi->lock);
if (inti) {
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_PFAULT_DONE, 0,
+ inti->ext.ext_params2);
+ VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
+ inti->ext.ext_params2);
+
rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
(u16 *)__LC_EXT_INT_CODE);
rc |= put_guest_lc(vcpu, PFAULT_DONE,
@@ -691,7 +696,7 @@
list);
if (inti) {
VCPU_EVENT(vcpu, 4,
- "interrupt: virtio parm:%x,parm64:%llx",
+ "deliver: virtio parm: 0x%x,parm64: 0x%llx",
inti->ext.ext_params, inti->ext.ext_params2);
vcpu->stat.deliver_virtio_interrupt++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
@@ -741,7 +746,7 @@
struct kvm_s390_interrupt_info,
list);
if (inti) {
- VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+ VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
vcpu->stat.deliver_io_int++;
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
inti->type,
@@ -855,7 +860,9 @@
goto no_timer;
}
+ preempt_disable();
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+ preempt_enable();
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/* underflow */
@@ -864,7 +871,7 @@
__set_cpu_idle(vcpu);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
- VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
+ VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
@@ -894,7 +901,9 @@
u64 now, sltime;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
+ preempt_disable();
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+ preempt_enable();
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
/*
@@ -968,6 +977,10 @@
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+ VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
+ irq->u.pgm.code, 0);
+
li->irq.pgm = irq->u.pgm;
set_bit(IRQ_PEND_PROG, &li->pending_irqs);
return 0;
@@ -978,9 +991,6 @@
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_irq irq;
- VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
- 0, 1);
spin_lock(&li->lock);
irq.u.pgm.code = code;
__inject_prog(vcpu, &irq);
@@ -996,10 +1006,6 @@
struct kvm_s390_irq irq;
int rc;
- VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
- pgm_info->code);
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
- pgm_info->code, 0, 1);
spin_lock(&li->lock);
irq.u.pgm = *pgm_info;
rc = __inject_prog(vcpu, &irq);
@@ -1012,11 +1018,11 @@
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
- irq->u.ext.ext_params, irq->u.ext.ext_params2);
+ VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
+ irq->u.ext.ext_params2);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
irq->u.ext.ext_params,
- irq->u.ext.ext_params2, 2);
+ irq->u.ext.ext_params2);
li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
@@ -1045,10 +1051,10 @@
struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
uint16_t src_id = irq->u.extcall.code;
- VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
+ VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
src_id);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
- src_id, 0, 2);
+ src_id, 0);
/* sending vcpu invalid */
if (src_id >= KVM_MAX_VCPUS ||
@@ -1070,10 +1076,10 @@
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
- VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
+ VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
irq->u.prefix.address);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
- irq->u.prefix.address, 0, 2);
+ irq->u.prefix.address, 0);
if (!is_vcpu_stopped(vcpu))
return -EBUSY;
@@ -1090,7 +1096,7 @@
struct kvm_s390_stop_info *stop = &li->irq.stop;
int rc = 0;
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
return -EINVAL;
@@ -1114,8 +1120,8 @@
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
- trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
+ VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
+ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
return 0;
@@ -1126,10 +1132,10 @@
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
+ VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
irq->u.emerg.code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
- irq->u.emerg.code, 0, 2);
+ irq->u.emerg.code, 0);
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
@@ -1142,10 +1148,10 @@
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
- VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
+ VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
irq->u.mchk.mcic);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
- irq->u.mchk.mcic, 2);
+ irq->u.mchk.mcic);
/*
* Because repressible machine checks can be indicated along with
@@ -1172,9 +1178,9 @@
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
+ VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
- 0, 0, 2);
+ 0, 0);
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1185,9 +1191,9 @@
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
- VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
+ VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
- 0, 0, 2);
+ 0, 0);
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1435,20 +1441,20 @@
inti->ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_INT_SERVICE:
- VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
+ VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
inti->ext.ext_params = s390int->parm;
break;
case KVM_S390_INT_PFAULT_DONE:
inti->ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_MCHK:
- VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
+ VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
s390int->parm64);
inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
inti->mchk.mcic = s390int->parm64;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- if (inti->type & IOINT_AI_MASK)
+ if (inti->type & KVM_S390_INT_IO_AI_MASK)
VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
else
VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
@@ -1535,8 +1541,6 @@
switch (irq->type) {
case KVM_S390_PROGRAM_INT:
- VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
- irq->u.pgm.code);
rc = __inject_prog(vcpu, irq);
break;
case KVM_S390_SIGP_SET_PREFIX:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f32f843..6861b74 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
+#include <asm/etr.h>
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
@@ -108,6 +109,9 @@
{ "diagnose_10", VCPU_STAT(diagnose_10) },
{ "diagnose_44", VCPU_STAT(diagnose_44) },
{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
+ { "diagnose_258", VCPU_STAT(diagnose_258) },
+ { "diagnose_308", VCPU_STAT(diagnose_308) },
+ { "diagnose_500", VCPU_STAT(diagnose_500) },
{ NULL }
};
@@ -124,6 +128,7 @@
}
static struct gmap_notifier gmap_notifier;
+debug_info_t *kvm_s390_dbf;
/* Section: not file related */
int kvm_arch_hardware_enable(void)
@@ -134,24 +139,69 @@
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
+/*
+ * This callback is executed during stop_machine(). All CPUs are therefore
+ * temporarily stopped. In order not to change guest behavior, we have to
+ * disable preemption whenever we touch the epoch of kvm and the VCPUs,
+ * so a CPU won't be stopped while calculating with the epoch.
+ */
+static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
+ void *v)
+{
+ struct kvm *kvm;
+ struct kvm_vcpu *vcpu;
+ int i;
+ unsigned long long *delta = v;
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ kvm->arch.epoch -= *delta;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ vcpu->arch.sie_block->epoch -= *delta;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block kvm_clock_notifier = {
+ .notifier_call = kvm_clock_sync,
+};
+
int kvm_arch_hardware_setup(void)
{
gmap_notifier.notifier_call = kvm_gmap_notifier;
gmap_register_ipte_notifier(&gmap_notifier);
+ atomic_notifier_chain_register(&s390_epoch_delta_notifier,
+ &kvm_clock_notifier);
return 0;
}
void kvm_arch_hardware_unsetup(void)
{
gmap_unregister_ipte_notifier(&gmap_notifier);
+ atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
+ &kvm_clock_notifier);
}
int kvm_arch_init(void *opaque)
{
+ kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
+ if (!kvm_s390_dbf)
+ return -ENOMEM;
+
+ if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
+ debug_unregister(kvm_s390_dbf);
+ return -ENOMEM;
+ }
+
/* Register floating interrupt controller interface. */
return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
}
+void kvm_arch_exit(void)
+{
+ debug_unregister(kvm_s390_dbf);
+}
+
/* Section: device related */
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
@@ -281,10 +331,12 @@
switch (cap->cap) {
case KVM_CAP_S390_IRQCHIP:
+ VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
kvm->arch.use_irqchip = 1;
r = 0;
break;
case KVM_CAP_S390_USER_SIGP:
+ VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
kvm->arch.user_sigp = 1;
r = 0;
break;
@@ -295,8 +347,11 @@
r = 0;
} else
r = -EINVAL;
+ VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
+ r ? "(not available)" : "(success)");
break;
case KVM_CAP_S390_USER_STSI:
+ VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
kvm->arch.user_stsi = 1;
r = 0;
break;
@@ -314,6 +369,8 @@
switch (attr->attr) {
case KVM_S390_VM_MEM_LIMIT_SIZE:
ret = 0;
+ VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
+ kvm->arch.gmap->asce_end);
if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
ret = -EFAULT;
break;
@@ -330,7 +387,13 @@
unsigned int idx;
switch (attr->attr) {
case KVM_S390_VM_MEM_ENABLE_CMMA:
+ /* enable CMMA only for z10 and later (EDAT_1) */
+ ret = -EINVAL;
+ if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
+ break;
+
ret = -EBUSY;
+ VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) == 0) {
kvm->arch.use_cmma = 1;
@@ -339,6 +402,11 @@
mutex_unlock(&kvm->lock);
break;
case KVM_S390_VM_MEM_CLR_CMMA:
+ ret = -EINVAL;
+ if (!kvm->arch.use_cmma)
+ break;
+
+ VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
mutex_lock(&kvm->lock);
idx = srcu_read_lock(&kvm->srcu);
s390_reset_cmma(kvm->arch.gmap->mm);
@@ -374,6 +442,7 @@
}
}
mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
break;
}
default:
@@ -400,22 +469,26 @@
kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
kvm->arch.crypto.aes_kw = 1;
+ VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
kvm->arch.crypto.dea_kw = 1;
+ VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+ VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+ VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
break;
default:
mutex_unlock(&kvm->lock);
@@ -440,6 +513,7 @@
if (gtod_high != 0)
return -EINVAL;
+ VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
return 0;
}
@@ -459,12 +533,15 @@
return r;
mutex_lock(&kvm->lock);
+ preempt_disable();
kvm->arch.epoch = gtod - host_tod;
kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
kvm_s390_vcpu_unblock_all(kvm);
+ preempt_enable();
mutex_unlock(&kvm->lock);
+ VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
return 0;
}
@@ -496,6 +573,7 @@
if (copy_to_user((void __user *)attr->addr, >od_high,
sizeof(gtod_high)))
return -EFAULT;
+ VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
return 0;
}
@@ -509,9 +587,12 @@
if (r)
return r;
+ preempt_disable();
gtod = host_tod + kvm->arch.epoch;
+ preempt_enable();
if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
return -EFAULT;
+ VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
return 0;
}
@@ -821,7 +902,9 @@
}
/* Enable storage key handling for the guest */
- s390_enable_skey();
+ r = s390_enable_skey();
+ if (r)
+ goto out;
for (i = 0; i < args->count; i++) {
hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -879,8 +962,7 @@
if (kvm->arch.use_irqchip) {
/* Set up dummy routing. */
memset(&routing, 0, sizeof(routing));
- kvm_set_irq_routing(kvm, &routing, 0, 0);
- r = 0;
+ r = kvm_set_irq_routing(kvm, &routing, 0, 0);
}
break;
}
@@ -1043,7 +1125,7 @@
sprintf(debug_name, "kvm-%u", current->pid);
- kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
+ kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
if (!kvm->arch.dbf)
goto out_err;
@@ -1086,7 +1168,7 @@
mutex_init(&kvm->arch.ipte_mutex);
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
- VM_EVENT(kvm, 3, "%s", "vm created");
+ VM_EVENT(kvm, 3, "vm created with type %lu", type);
if (type & KVM_VM_S390_UCONTROL) {
kvm->arch.gmap = NULL;
@@ -1103,6 +1185,7 @@
kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock);
+ KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
return 0;
out_err:
@@ -1110,6 +1193,7 @@
free_page((unsigned long)kvm->arch.model.fac);
debug_unregister(kvm->arch.dbf);
free_page((unsigned long)(kvm->arch.sca));
+ KVM_EVENT(3, "creation of vm failed: %d", rc);
return rc;
}
@@ -1131,7 +1215,7 @@
if (kvm_is_ucontrol(vcpu->kvm))
gmap_free(vcpu->arch.gmap);
- if (kvm_s390_cmma_enabled(vcpu->kvm))
+ if (vcpu->kvm->arch.use_cmma)
kvm_s390_vcpu_unsetup_cmma(vcpu);
free_page((unsigned long)(vcpu->arch.sie_block));
@@ -1166,6 +1250,7 @@
gmap_free(kvm->arch.gmap);
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
+ KVM_EVENT(3, "vm 0x%p destroyed", kvm);
}
/* Section: vcpu related */
@@ -1264,7 +1349,9 @@
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
mutex_lock(&vcpu->kvm->lock);
+ preempt_disable();
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+ preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
if (!kvm_is_ucontrol(vcpu->kvm))
vcpu->arch.gmap = vcpu->kvm->arch.gmap;
@@ -1342,7 +1429,7 @@
}
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
- if (kvm_s390_cmma_enabled(vcpu->kvm)) {
+ if (vcpu->kvm->arch.use_cmma) {
rc = kvm_s390_vcpu_setup_cmma(vcpu);
if (rc)
return rc;
@@ -1723,18 +1810,6 @@
return rc;
}
-bool kvm_s390_cmma_enabled(struct kvm *kvm)
-{
- if (!MACHINE_IS_LPAR)
- return false;
- /* only enable for z10 and later */
- if (!MACHINE_HAS_EDAT1)
- return false;
- if (!kvm->arch.use_cmma)
- return false;
- return true;
-}
-
static bool ibs_enabled(struct kvm_vcpu *vcpu)
{
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
@@ -2340,6 +2415,7 @@
case KVM_CAP_S390_CSS_SUPPORT:
if (!vcpu->kvm->arch.css_support) {
vcpu->kvm->arch.css_support = 1;
+ VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
trace_kvm_s390_enable_css(vcpu->kvm);
}
r = 0;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c570478..c446aab 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -27,6 +27,13 @@
#define TDB_FORMAT1 1
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
+extern debug_info_t *kvm_s390_dbf;
+#define KVM_EVENT(d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
+ d_args); \
+} while (0)
+
#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
do { \
debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
@@ -65,6 +72,8 @@
static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
{
+ VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
+ prefix);
vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
@@ -217,8 +226,6 @@
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
-/* is cmma enabled */
-bool kvm_s390_cmma_enabled(struct kvm *kvm);
unsigned long kvm_s390_fac_list_mask_size(void);
extern unsigned long kvm_s390_fac_list_mask[];
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index ad42422..4d21dc4 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -53,11 +53,14 @@
kvm_s390_set_psw_cc(vcpu, 3);
return 0;
}
+ VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
val = (val - hostclk) & ~0x3fUL;
mutex_lock(&vcpu->kvm->lock);
+ preempt_disable();
kvm_for_each_vcpu(i, cpup, vcpu->kvm)
cpup->arch.sie_block->epoch = val;
+ preempt_enable();
mutex_unlock(&vcpu->kvm->lock);
kvm_s390_set_psw_cc(vcpu, 0);
@@ -98,8 +101,6 @@
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
kvm_s390_set_prefix(vcpu, address);
-
- VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
trace_kvm_s390_handle_prefix(vcpu, 1, address);
return 0;
}
@@ -129,7 +130,7 @@
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
- VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
+ VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
trace_kvm_s390_handle_prefix(vcpu, 0, address);
return 0;
}
@@ -155,7 +156,7 @@
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
- VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
+ VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
trace_kvm_s390_handle_stap(vcpu, ga);
return 0;
}
@@ -167,6 +168,7 @@
return rc;
rc = s390_enable_skey();
+ VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
trace_kvm_s390_skey_related_inst(vcpu);
vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
return rc;
@@ -370,7 +372,7 @@
&fac, sizeof(fac));
if (rc)
return rc;
- VCPU_EVENT(vcpu, 5, "store facility list value %x", fac);
+ VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
trace_kvm_s390_handle_stfl(vcpu, fac);
return 0;
}
@@ -468,7 +470,7 @@
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
- VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
+ VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
return 0;
}
@@ -521,7 +523,7 @@
ar_t ar;
vcpu->stat.instruction_stsi++;
- VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
+ VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -758,10 +760,10 @@
struct gmap *gmap;
int i;
- VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
+ VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
gmap = vcpu->arch.gmap;
vcpu->stat.instruction_essa++;
- if (!kvm_s390_cmma_enabled(vcpu->kvm))
+ if (!vcpu->kvm->arch.use_cmma)
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -829,7 +831,7 @@
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+ VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -868,7 +870,7 @@
if (ga & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+ VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
reg = reg1;
@@ -902,7 +904,7 @@
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+ VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -940,7 +942,7 @@
if (ga & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+ VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
reg = reg1;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 72e58bd..da690b6 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -205,9 +205,6 @@
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
return SIGP_CC_STATUS_STORED;
- } else if (rc == 0) {
- VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
- dst_vcpu->vcpu_id, irq.u.prefix.address);
}
return rc;
@@ -371,7 +368,8 @@
return rc;
}
-static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
+static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
+ u16 cpu_addr)
{
if (!vcpu->kvm->arch.user_sigp)
return 0;
@@ -414,9 +412,8 @@
default:
vcpu->stat.instruction_sigp_unknown++;
}
-
- VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
- order_code);
+ VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
+ order_code, cpu_addr);
return 1;
}
@@ -435,7 +432,7 @@
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
- if (handle_sigp_order_in_user_space(vcpu, order_code))
+ if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
return -EOPNOTSUPP;
if (r1 % 2)
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 3208d33..cc1d6c6 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -105,11 +105,22 @@
{KVM_S390_PROGRAM_INT, "program interrupt"}, \
{KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"}, \
{KVM_S390_RESTART, "sigp restart"}, \
+ {KVM_S390_INT_PFAULT_INIT, "pfault init"}, \
+ {KVM_S390_INT_PFAULT_DONE, "pfault done"}, \
+ {KVM_S390_MCHK, "machine check"}, \
+ {KVM_S390_INT_CLOCK_COMP, "clock comparator"}, \
+ {KVM_S390_INT_CPU_TIMER, "cpu timer"}, \
{KVM_S390_INT_VIRTIO, "virtio interrupt"}, \
{KVM_S390_INT_SERVICE, "sclp interrupt"}, \
{KVM_S390_INT_EMERGENCY, "sigp emergency"}, \
{KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"}
+#define get_irq_name(__type) \
+ (__type > KVM_S390_INT_IO_MAX ? \
+ __print_symbolic(__type, kvm_s390_int_type) : \
+ (__type & KVM_S390_INT_IO_AI_MASK ? \
+ "adapter I/O interrupt" : "subchannel I/O interrupt"))
+
TRACE_EVENT(kvm_s390_inject_vm,
TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who),
TP_ARGS(type, parm, parm64, who),
@@ -131,22 +142,19 @@
TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx",
(__entry->who == 1) ? " (from kernel)" :
(__entry->who == 2) ? " (from user)" : "",
- __entry->inttype,
- __print_symbolic(__entry->inttype, kvm_s390_int_type),
+ __entry->inttype, get_irq_name(__entry->inttype),
__entry->parm, __entry->parm64)
);
TRACE_EVENT(kvm_s390_inject_vcpu,
- TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \
- int who),
- TP_ARGS(id, type, parm, parm64, who),
+ TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64),
+ TP_ARGS(id, type, parm, parm64),
TP_STRUCT__entry(
__field(int, id)
__field(__u32, inttype)
__field(__u32, parm)
__field(__u64, parm64)
- __field(int, who)
),
TP_fast_assign(
@@ -154,15 +162,12 @@
__entry->inttype = type & 0x00000000ffffffff;
__entry->parm = parm;
__entry->parm64 = parm64;
- __entry->who = who;
),
- TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
- (__entry->who == 1) ? " (from kernel)" :
- (__entry->who == 2) ? " (from user)" : "",
+ TP_printk("inject (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
__entry->id, __entry->inttype,
- __print_symbolic(__entry->inttype, kvm_s390_int_type),
- __entry->parm, __entry->parm64)
+ get_irq_name(__entry->inttype), __entry->parm,
+ __entry->parm64)
);
/*
@@ -189,8 +194,8 @@
TP_printk("deliver interrupt (vcpu %d): type:%x (%s) " \
"data:%08llx %016llx",
__entry->id, __entry->inttype,
- __print_symbolic(__entry->inttype, kvm_s390_int_type),
- __entry->data0, __entry->data1)
+ get_irq_name(__entry->inttype), __entry->data0,
+ __entry->data1)
);
/*
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 5a18447..a7e257d 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -140,6 +140,7 @@
*/
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */
+ movq RAX(%rsp), %rax
RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */
xorq %r8, %r8
@@ -219,7 +220,6 @@
1: setbe %al /* 1 if error, 0 if not */
movzbl %al, %edi /* zero-extend that into %edi */
call __audit_syscall_exit
- movq RAX(%rsp), %rax /* reload syscall return value */
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -368,6 +368,7 @@
RESTORE_RSI_RDI_RDX
movl RIP(%rsp), %ecx
movl EFLAGS(%rsp), %r11d
+ movq RAX(%rsp), %rax
xorq %r10, %r10
xorq %r9, %r9
xorq %r8, %r8
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 49ec903..c12e845 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -252,6 +252,11 @@
int size;
};
+struct rsvd_bits_validate {
+ u64 rsvd_bits_mask[2][4];
+ u64 bad_mt_xwr;
+};
+
/*
* x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
* 32-bit). The kvm_mmu structure abstracts the details of the current mmu
@@ -289,8 +294,15 @@
u64 *pae_root;
u64 *lm_root;
- u64 rsvd_bits_mask[2][4];
- u64 bad_mt_xwr;
+
+ /*
+ * check zero bits on shadow page table entries, these
+ * bits include not only hardware reserved bits but also
+ * the bits spte never used.
+ */
+ struct rsvd_bits_validate shadow_zero_check;
+
+ struct rsvd_bits_validate guest_rsvd_check;
/*
* Bitmap: bit set = last pte in walk
@@ -358,6 +370,11 @@
struct list_head head;
};
+/* Hyper-V per vcpu emulation context */
+struct kvm_vcpu_hv {
+ u64 hv_vapic;
+};
+
struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
@@ -514,8 +531,7 @@
/* used for guest single stepping over the given code position */
unsigned long singlestep_rip;
- /* fields used by HYPER-V emulation */
- u64 hv_vapic;
+ struct kvm_vcpu_hv hyperv;
cpumask_var_t wbinvd_dirty_mask;
@@ -586,6 +602,17 @@
struct kvm_lapic *logical_map[16][16];
};
+/* Hyper-V emulation context */
+struct kvm_hv {
+ u64 hv_guest_os_id;
+ u64 hv_hypercall;
+ u64 hv_tsc_page;
+
+ /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
+ u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
+ u64 hv_crash_ctl;
+};
+
struct kvm_arch {
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
@@ -645,16 +672,14 @@
/* reads protected by irq_srcu, writes by irq_lock */
struct hlist_head mask_notifier_list;
- /* fields used by HYPER-V emulation */
- u64 hv_guest_os_id;
- u64 hv_hypercall;
- u64 hv_tsc_page;
+ struct kvm_hv hyperv;
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
bool boot_vcpu_runs_old_kvmclock;
+ u32 bsp_vcpu_id;
u64 disabled_quirks;
};
@@ -1203,5 +1228,7 @@
const struct kvm_userspace_memory_region *mem);
int x86_set_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem);
+bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b18..9dfce4e 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@
unsigned long ip;
unsigned long flags;
unsigned short cs;
- unsigned short __pad2; /* Was called gs, but was always zero. */
- unsigned short __pad1; /* Was called fs, but was always zero. */
- unsigned short ss;
+ unsigned short gs;
+ unsigned short fs;
+ unsigned short __pad0;
unsigned long err;
unsigned long trapno;
unsigned long oldmask;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 751bf4b..d7f3b3b 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -79,12 +79,12 @@
#else /* CONFIG_X86_32 */
/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
+#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
#define __EXTRA_CLOBBER \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
- "r12", "r13", "r14", "r15", "flags"
+ "r12", "r13", "r14", "r15"
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
@@ -100,11 +100,7 @@
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
-/*
- * There is no need to save or restore flags, because flags are always
- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
- * has no effect.
- */
+/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index da772ed..448b7ca 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -47,6 +47,7 @@
#define CPU_BASED_MOV_DR_EXITING 0x00800000
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
#define CPU_BASED_USE_IO_BITMAPS 0x02000000
+#define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
#define CPU_BASED_USE_MSR_BITMAPS 0x10000000
#define CPU_BASED_MONITOR_EXITING 0x20000000
#define CPU_BASED_PAUSE_EXITING 0x40000000
@@ -367,29 +368,29 @@
#define TYPE_PHYSICAL_APIC_EVENT (10 << 12)
#define TYPE_PHYSICAL_APIC_INST (15 << 12)
-/* segment AR */
-#define SEGMENT_AR_L_MASK (1 << 13)
+/* segment AR in VMCS -- these are different from what LAR reports */
+#define VMX_SEGMENT_AR_L_MASK (1 << 13)
-#define AR_TYPE_ACCESSES_MASK 1
-#define AR_TYPE_READABLE_MASK (1 << 1)
-#define AR_TYPE_WRITEABLE_MASK (1 << 2)
-#define AR_TYPE_CODE_MASK (1 << 3)
-#define AR_TYPE_MASK 0x0f
-#define AR_TYPE_BUSY_64_TSS 11
-#define AR_TYPE_BUSY_32_TSS 11
-#define AR_TYPE_BUSY_16_TSS 3
-#define AR_TYPE_LDT 2
+#define VMX_AR_TYPE_ACCESSES_MASK 1
+#define VMX_AR_TYPE_READABLE_MASK (1 << 1)
+#define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2)
+#define VMX_AR_TYPE_CODE_MASK (1 << 3)
+#define VMX_AR_TYPE_MASK 0x0f
+#define VMX_AR_TYPE_BUSY_64_TSS 11
+#define VMX_AR_TYPE_BUSY_32_TSS 11
+#define VMX_AR_TYPE_BUSY_16_TSS 3
+#define VMX_AR_TYPE_LDT 2
-#define AR_UNUSABLE_MASK (1 << 16)
-#define AR_S_MASK (1 << 4)
-#define AR_P_MASK (1 << 7)
-#define AR_L_MASK (1 << 13)
-#define AR_DB_MASK (1 << 14)
-#define AR_G_MASK (1 << 15)
-#define AR_DPL_SHIFT 5
-#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
+#define VMX_AR_UNUSABLE_MASK (1 << 16)
+#define VMX_AR_S_MASK (1 << 4)
+#define VMX_AR_P_MASK (1 << 7)
+#define VMX_AR_L_MASK (1 << 13)
+#define VMX_AR_DB_MASK (1 << 14)
+#define VMX_AR_G_MASK (1 << 15)
+#define VMX_AR_DPL_SHIFT 5
+#define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3)
-#define AR_RESERVD_MASK 0xfffe0f00
+#define VMX_AR_RESERVD_MASK 0xfffe0f00
#define TSS_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 0)
#define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_USER_MEM_SLOTS + 1)
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973..40836a9 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
-
- /*
- * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
- * Linux saved and restored fs and gs in these slots. This
- * was counterproductive, as fsbase and gsbase were never
- * saved, so arch_prctl was presumably unreliable.
- *
- * If these slots are ever needed for any other purpose, there
- * is some risk that very old 64-bit binaries could get
- * confused. I doubt that many such binaries still work,
- * though, since the same patch in 2.5.64 also removed the
- * 64-bit set_thread_area syscall, so it appears that there is
- * no TLS API that works in both pre- and post-2.5.64 kernels.
- */
- __u16 __pad2; /* Was gs. */
- __u16 __pad1; /* Was fs. */
-
- __u16 ss;
+ __u16 gs;
+ __u16 fs;
+ __u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 1fe9218..37fee27 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -58,6 +58,7 @@
#define EXIT_REASON_INVALID_STATE 33
#define EXIT_REASON_MSR_LOAD_FAIL 34
#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MONITOR_TRAP_FLAG 37
#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MCE_DURING_VMENTRY 41
@@ -106,6 +107,7 @@
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
+ { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
{ EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index dcb5285..cde732c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1424,7 +1424,7 @@
{
u64 msr;
- if (cpu_has_apic)
+ if (!cpu_has_apic)
return;
rdmsrl(MSR_IA32_APICBASE, msr);
@@ -1483,10 +1483,13 @@
static __init void x2apic_disable(void)
{
- u32 x2apic_id;
+ u32 x2apic_id, state = x2apic_state;
- if (x2apic_state != X2APIC_ON)
- goto out;
+ x2apic_mode = 0;
+ x2apic_state = X2APIC_DISABLED;
+
+ if (state != X2APIC_ON)
+ return;
x2apic_id = read_apic_id();
if (x2apic_id >= 255)
@@ -1494,9 +1497,6 @@
__x2apic_disable();
register_lapic_address(mp_lapic_addr);
-out:
- x2apic_state = X2APIC_DISABLED;
- x2apic_mode = 0;
}
static __init void x2apic_enable(void)
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index f813261..2683f36 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -322,7 +322,7 @@
irq_data->chip = &lapic_controller;
irq_data->chip_data = data;
irq_data->hwirq = virq + i;
- err = assign_irq_vector_policy(virq, irq_data->node, data,
+ err = assign_irq_vector_policy(virq + i, irq_data->node, data,
info);
if (err)
goto error;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a9..6326ae2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2534,7 +2534,7 @@
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
- return NOTIFY_BAD;
+ goto err;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
if (!cpuc->constraint_list)
- return NOTIFY_BAD;
+ goto err_shared_regs;
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
- if (!cpuc->excl_cntrs) {
- kfree(cpuc->constraint_list);
- kfree(cpuc->shared_regs);
- return NOTIFY_BAD;
- }
+ if (!cpuc->excl_cntrs)
+ goto err_constraint_list;
+
cpuc->excl_thread_id = 0;
}
return NOTIFY_OK;
+
+err_constraint_list:
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
+
+err_shared_regs:
+ kfree(cpuc->shared_regs);
+ cpuc->shared_regs = NULL;
+
+err:
+ return NOTIFY_BAD;
}
static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 63eb68b..377e8f8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -1255,7 +1255,7 @@
cpumask_set_cpu(cpu, &cqm_cpumask);
}
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
{
struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@
unsigned int cpu = (unsigned long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
- intel_cqm_cpu_prepare(cpu);
- break;
case CPU_DOWN_PREPARE:
intel_cqm_cpu_exit(cpu);
break;
case CPU_STARTING:
+ intel_cqm_cpu_starting(cpu);
cqm_pick_event_reader(cpu);
break;
}
@@ -1373,7 +1371,7 @@
goto out;
for_each_online_cpu(i) {
- intel_cqm_cpu_prepare(i);
+ intel_cqm_cpu_starting(i);
cqm_pick_event_reader(i);
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 79de954..d25097c 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -270,7 +270,7 @@
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
- if (src_fpu->fpstate_active)
+ if (src_fpu->fpstate_active && cpu_has_fpu)
fpu_copy(dst_fpu, src_fpu);
return 0;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 1e173f6..d14e9ac 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -40,7 +40,12 @@
write_cr0(cr0);
/* Flush out any pending x87 state: */
- asm volatile ("fninit");
+#ifdef CONFIG_MATH_EMULATION
+ if (!cpu_has_fpu)
+ fpstate_init_soft(¤t->thread.fpu.state.soft);
+ else
+#endif
+ asm volatile ("fninit");
}
/*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 397688b..c27cad7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -408,6 +408,7 @@
static void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
+ trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
smp_mb(); /* quirk */
clflush((void *)¤t_thread_info()->flags);
@@ -419,6 +420,7 @@
__sti_mwait(0, 0);
else
local_irq_enable();
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c..71820c4 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@
COPY(r15);
#endif /* CONFIG_X86_64 */
+#ifdef CONFIG_X86_32
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+ /* Kernel saves and restores only the CS segment register on signals,
+ * which is the bare minimum needed to allow mixed 32/64-bit code.
+ * App's signal handler can save/restore other segments if needed. */
+ COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@
#else /* !CONFIG_X86_32 */
put_user_ex(regs->flags, &sc->flags);
put_user_ex(regs->cs, &sc->cs);
- put_user_ex(0, &sc->__pad2);
- put_user_ex(0, &sc->__pad1);
- put_user_ex(regs->ss, &sc->ss);
+ put_user_ex(0, &sc->gs);
+ put_user_ex(0, &sc->fs);
#endif /* CONFIG_X86_32 */
put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@
regs->sp = (unsigned long)frame;
- /*
- * Set up the CS and SS registers to run signal handlers in
- * 64-bit mode, even if the handler happens to be interrupting
- * 32-bit or 16-bit code.
- *
- * SS is subtle. In 64-bit mode, we don't need any particular
- * SS descriptor, but we do need SS to be valid. It's possible
- * that the old SS is entirely bogus -- this can happen if the
- * signal we're trying to deliver is #GP or #SS caused by a bad
- * SS value.
- */
+ /* Set up the CS register to run signal handlers in 64-bit mode,
+ even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
- regs->ss = __USER_DS;
return 0;
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 6273324..0ccb53a 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -28,11 +28,11 @@
struct desc_struct *desc;
unsigned long base;
- seg &= ~7UL;
+ seg >>= 3;
mutex_lock(&child->mm->context.lock);
if (unlikely(!child->mm->context.ldt ||
- (seg >> 3) >= child->mm->context.ldt->size))
+ seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
desc = &child->mm->context.ldt->entries[seg];
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 67d215c..a1ff508 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,7 +12,9 @@
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
- i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o
+ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
+ hyperv.o
+
kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
kvm-intel-y += vmx.o pmu_intel.o
kvm-amd-y += svm.o pmu_amd.o
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
new file mode 100644
index 0000000..a8160d2
--- /dev/null
+++ b/arch/x86/kvm/hyperv.c
@@ -0,0 +1,377 @@
+/*
+ * KVM Microsoft Hyper-V emulation
+ *
+ * derived from arch/x86/kvm/x86.c
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ * Copyright (C) 2008 Qumranet, Inc.
+ * Copyright IBM Corporation, 2008
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ * Amit Shah <amit.shah@qumranet.com>
+ * Ben-Ami Yassour <benami@il.ibm.com>
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "x86.h"
+#include "lapic.h"
+#include "hyperv.h"
+
+#include <linux/kvm_host.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+static bool kvm_hv_msr_partition_wide(u32 msr)
+{
+ bool r = false;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ case HV_X64_MSR_HYPERCALL:
+ case HV_X64_MSR_REFERENCE_TSC:
+ case HV_X64_MSR_TIME_REF_COUNT:
+ case HV_X64_MSR_CRASH_CTL:
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ r = true;
+ break;
+ }
+
+ return r;
+}
+
+static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
+ u32 index, u64 *pdata)
+{
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+ return -EINVAL;
+
+ *pdata = hv->hv_crash_param[index];
+ return 0;
+}
+
+static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
+{
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+ *pdata = hv->hv_crash_ctl;
+ return 0;
+}
+
+static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
+{
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+ if (host)
+ hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
+
+ if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
+
+ vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
+ hv->hv_crash_param[0],
+ hv->hv_crash_param[1],
+ hv->hv_crash_param[2],
+ hv->hv_crash_param[3],
+ hv->hv_crash_param[4]);
+
+ /* Send notification about crash to user space */
+ kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
+ }
+
+ return 0;
+}
+
+static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
+ u32 index, u64 data)
+{
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+ return -EINVAL;
+
+ hv->hv_crash_param[index] = data;
+ return 0;
+}
+
+static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
+ bool host)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_hv *hv = &kvm->arch.hyperv;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ hv->hv_guest_os_id = data;
+ /* setting guest os id to zero disables hypercall page */
+ if (!hv->hv_guest_os_id)
+ hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
+ break;
+ case HV_X64_MSR_HYPERCALL: {
+ u64 gfn;
+ unsigned long addr;
+ u8 instructions[4];
+
+ /* if guest os id is not set hypercall should remain disabled */
+ if (!hv->hv_guest_os_id)
+ break;
+ if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
+ hv->hv_hypercall = data;
+ break;
+ }
+ gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr))
+ return 1;
+ kvm_x86_ops->patch_hypercall(vcpu, instructions);
+ ((unsigned char *)instructions)[3] = 0xc3; /* ret */
+ if (__copy_to_user((void __user *)addr, instructions, 4))
+ return 1;
+ hv->hv_hypercall = data;
+ mark_page_dirty(kvm, gfn);
+ break;
+ }
+ case HV_X64_MSR_REFERENCE_TSC: {
+ u64 gfn;
+ HV_REFERENCE_TSC_PAGE tsc_ref;
+
+ memset(&tsc_ref, 0, sizeof(tsc_ref));
+ hv->hv_tsc_page = data;
+ if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ break;
+ gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+ if (kvm_write_guest(
+ kvm,
+ gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
+ &tsc_ref, sizeof(tsc_ref)))
+ return 1;
+ mark_page_dirty(kvm, gfn);
+ break;
+ }
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ return kvm_hv_msr_set_crash_data(vcpu,
+ msr - HV_X64_MSR_CRASH_P0,
+ data);
+ case HV_X64_MSR_CRASH_CTL:
+ return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
+ default:
+ vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+ msr, data);
+ return 1;
+ }
+ return 0;
+}
+
+static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+
+ switch (msr) {
+ case HV_X64_MSR_APIC_ASSIST_PAGE: {
+ u64 gfn;
+ unsigned long addr;
+
+ if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
+ hv->hv_vapic = data;
+ if (kvm_lapic_enable_pv_eoi(vcpu, 0))
+ return 1;
+ break;
+ }
+ gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
+ addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
+ if (kvm_is_error_hva(addr))
+ return 1;
+ if (__clear_user((void __user *)addr, PAGE_SIZE))
+ return 1;
+ hv->hv_vapic = data;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ if (kvm_lapic_enable_pv_eoi(vcpu,
+ gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
+ return 1;
+ break;
+ }
+ case HV_X64_MSR_EOI:
+ return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
+ case HV_X64_MSR_ICR:
+ return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
+ case HV_X64_MSR_TPR:
+ return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+ default:
+ vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+ msr, data);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+ u64 data = 0;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_hv *hv = &kvm->arch.hyperv;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ data = hv->hv_guest_os_id;
+ break;
+ case HV_X64_MSR_HYPERCALL:
+ data = hv->hv_hypercall;
+ break;
+ case HV_X64_MSR_TIME_REF_COUNT: {
+ data =
+ div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
+ break;
+ }
+ case HV_X64_MSR_REFERENCE_TSC:
+ data = hv->hv_tsc_page;
+ break;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ return kvm_hv_msr_get_crash_data(vcpu,
+ msr - HV_X64_MSR_CRASH_P0,
+ pdata);
+ case HV_X64_MSR_CRASH_CTL:
+ return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
+ default:
+ vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+ return 1;
+ }
+
+ *pdata = data;
+ return 0;
+}
+
+static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+ u64 data = 0;
+ struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+
+ switch (msr) {
+ case HV_X64_MSR_VP_INDEX: {
+ int r;
+ struct kvm_vcpu *v;
+
+ kvm_for_each_vcpu(r, v, vcpu->kvm) {
+ if (v == vcpu) {
+ data = r;
+ break;
+ }
+ }
+ break;
+ }
+ case HV_X64_MSR_EOI:
+ return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
+ case HV_X64_MSR_ICR:
+ return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
+ case HV_X64_MSR_TPR:
+ return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
+ case HV_X64_MSR_APIC_ASSIST_PAGE:
+ data = hv->hv_vapic;
+ break;
+ default:
+ vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+ return 1;
+ }
+ *pdata = data;
+ return 0;
+}
+
+int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
+{
+ if (kvm_hv_msr_partition_wide(msr)) {
+ int r;
+
+ mutex_lock(&vcpu->kvm->lock);
+ r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
+ } else
+ return kvm_hv_set_msr(vcpu, msr, data);
+}
+
+int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+ if (kvm_hv_msr_partition_wide(msr)) {
+ int r;
+
+ mutex_lock(&vcpu->kvm->lock);
+ r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
+ mutex_unlock(&vcpu->kvm->lock);
+ return r;
+ } else
+ return kvm_hv_get_msr(vcpu, msr, pdata);
+}
+
+bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+{
+ return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+}
+
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+ u64 param, ingpa, outgpa, ret;
+ uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
+ bool fast, longmode;
+
+ /*
+ * hypercall generates UD from non zero cpl and real mode
+ * per HYPER-V spec
+ */
+ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ longmode = is_64_bit_mode(vcpu);
+
+ if (!longmode) {
+ param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
+ (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
+ ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
+ (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
+ outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
+ (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+ }
+#ifdef CONFIG_X86_64
+ else {
+ param = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
+ outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+ }
+#endif
+
+ code = param & 0xffff;
+ fast = (param >> 16) & 0x1;
+ rep_cnt = (param >> 32) & 0xfff;
+ rep_idx = (param >> 48) & 0xfff;
+
+ trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+
+ switch (code) {
+ case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+ kvm_vcpu_on_spin(vcpu);
+ break;
+ default:
+ res = HV_STATUS_INVALID_HYPERCALL_CODE;
+ break;
+ }
+
+ ret = res | (((u64)rep_done & 0xfff) << 32);
+ if (longmode) {
+ kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+ } else {
+ kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
+ }
+
+ return 1;
+}
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
new file mode 100644
index 0000000..c7bce55
--- /dev/null
+++ b/arch/x86/kvm/hyperv.h
@@ -0,0 +1,32 @@
+/*
+ * KVM Microsoft Hyper-V emulation
+ *
+ * derived from arch/x86/kvm/x86.c
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ * Copyright (C) 2008 Qumranet, Inc.
+ * Copyright IBM Corporation, 2008
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ * Amit Shah <amit.shah@qumranet.com>
+ * Ben-Ami Yassour <benami@il.ibm.com>
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef __ARCH_X86_KVM_HYPERV_H__
+#define __ARCH_X86_KVM_HYPERV_H__
+
+int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
+int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
+bool kvm_hv_hypercall_enabled(struct kvm *kvm);
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index fef922f..7cc2360 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -651,15 +651,10 @@
return NULL;
}
-void kvm_destroy_pic(struct kvm *kvm)
+void kvm_destroy_pic(struct kvm_pic *vpic)
{
- struct kvm_pic *vpic = kvm->arch.vpic;
-
- if (vpic) {
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master);
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave);
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr);
- kvm->arch.vpic = NULL;
- kfree(vpic);
- }
+ kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
+ kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
+ kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
+ kfree(vpic);
}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ad68c73..3d782a2 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -74,7 +74,7 @@
};
struct kvm_pic *kvm_create_pic(struct kvm *kvm);
-void kvm_destroy_pic(struct kvm *kvm);
+void kvm_destroy_pic(struct kvm_pic *vpic);
int kvm_pic_read_irq(struct kvm *kvm);
void kvm_pic_update_irq(struct kvm_pic *s);
@@ -85,11 +85,11 @@
static inline int irqchip_in_kernel(struct kvm *kvm)
{
- int ret;
+ struct kvm_pic *vpic = pic_irqchip(kvm);
- ret = (pic_irqchip(kvm) != NULL);
+ /* Read vpic before kvm->irq_routing. */
smp_rmb();
- return ret;
+ return vpic != NULL;
}
void kvm_pic_reset(struct kvm_kpic_state *s);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2a5ca97c..9a3e342 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1900,8 +1900,9 @@
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
- kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
- sizeof(u32));
+ if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32)))
+ return;
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 7195274..7640379 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -91,7 +91,7 @@
static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
+ return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
}
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4417146..fb16a8e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -357,12 +357,6 @@
{
return ACCESS_ONCE(*sptep);
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- /* It is valid if the spte is zapped. */
- return spte == 0ull;
-}
#else
union split_spte {
struct {
@@ -478,23 +472,6 @@
return spte.spte;
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- union split_spte sspte = (union split_spte)spte;
- u32 high_mmio_mask = shadow_mmio_mask >> 32;
-
- /* It is valid if the spte is zapped. */
- if (spte == 0ull)
- return true;
-
- /* It is valid if the spte is being zapped. */
- if (sspte.spte_low == 0ull &&
- (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
- return true;
-
- return false;
-}
#endif
static bool spte_is_locklessly_modifiable(u64 spte)
@@ -3291,6 +3268,25 @@
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
}
+static bool
+__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
+{
+ int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;
+
+ return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
+ ((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
+}
+
+static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
+{
+ return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
+}
+
+static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
+{
+ return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
+}
+
static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
if (direct)
@@ -3299,46 +3295,62 @@
return vcpu_match_mmio_gva(vcpu, addr);
}
-
-/*
- * On direct hosts, the last spte is only allows two states
- * for mmio page fault:
- * - It is the mmio spte
- * - It is zapped or it is being zapped.
- *
- * This function completely checks the spte when the last spte
- * is not the mmio spte.
- */
-static bool check_direct_spte_mmio_pf(u64 spte)
-{
- return __check_direct_spte_mmio_pf(spte);
-}
-
-static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+/* return true if reserved bit is detected on spte. */
+static bool
+walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
{
struct kvm_shadow_walk_iterator iterator;
- u64 spte = 0ull;
+ u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
+ int root, leaf;
+ bool reserved = false;
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
- return spte;
+ goto exit;
walk_shadow_page_lockless_begin(vcpu);
- for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
+
+ for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level;
+ shadow_walk_okay(&iterator);
+ __shadow_walk_next(&iterator, spte)) {
+ leaf = iterator.level;
+ spte = mmu_spte_get_lockless(iterator.sptep);
+
+ sptes[leaf - 1] = spte;
+
if (!is_shadow_present_pte(spte))
break;
+
+ reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
+ leaf);
+ }
+
walk_shadow_page_lockless_end(vcpu);
- return spte;
+ if (reserved) {
+ pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
+ __func__, addr);
+ while (root >= leaf) {
+ pr_err("------ spte 0x%llx level %d.\n",
+ sptes[root - 1], root);
+ root--;
+ }
+ }
+exit:
+ *sptep = spte;
+ return reserved;
}
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
u64 spte;
+ bool reserved;
if (quickly_check_mmio_pf(vcpu, addr, direct))
return RET_MMIO_PF_EMULATE;
- spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
+ reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
+ if (unlikely(reserved))
+ return RET_MMIO_PF_BUG;
if (is_mmio_spte(spte)) {
gfn_t gfn = get_mmio_spte_gfn(spte);
@@ -3356,13 +3368,6 @@
}
/*
- * It's ok if the gva is remapped by other cpus on shadow guest,
- * it's a BUG if the gfn is not a mmio page.
- */
- if (direct && !check_direct_spte_mmio_pf(spte))
- return RET_MMIO_PF_BUG;
-
- /*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
@@ -3604,19 +3609,21 @@
#include "paging_tmpl.h"
#undef PTTYPE
-static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void
+__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
+ struct rsvd_bits_validate *rsvd_check,
+ int maxphyaddr, int level, bool nx, bool gbpages,
+ bool pse)
{
- int maxphyaddr = cpuid_maxphyaddr(vcpu);
u64 exb_bit_rsvd = 0;
u64 gbpages_bit_rsvd = 0;
u64 nonleaf_bit8_rsvd = 0;
- context->bad_mt_xwr = 0;
+ rsvd_check->bad_mt_xwr = 0;
- if (!context->nx)
+ if (!nx)
exb_bit_rsvd = rsvd_bits(63, 63);
- if (!guest_cpuid_has_gbpages(vcpu))
+ if (!gbpages)
gbpages_bit_rsvd = rsvd_bits(7, 7);
/*
@@ -3626,80 +3633,95 @@
if (guest_cpuid_is_amd(vcpu))
nonleaf_bit8_rsvd = rsvd_bits(8, 8);
- switch (context->root_level) {
+ switch (level) {
case PT32_ROOT_LEVEL:
/* no rsvd bits for 2 level 4K page table entries */
- context->rsvd_bits_mask[0][1] = 0;
- context->rsvd_bits_mask[0][0] = 0;
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+ rsvd_check->rsvd_bits_mask[0][1] = 0;
+ rsvd_check->rsvd_bits_mask[0][0] = 0;
+ rsvd_check->rsvd_bits_mask[1][0] =
+ rsvd_check->rsvd_bits_mask[0][0];
- if (!is_pse(vcpu)) {
- context->rsvd_bits_mask[1][1] = 0;
+ if (!pse) {
+ rsvd_check->rsvd_bits_mask[1][1] = 0;
break;
}
if (is_cpuid_PSE36())
/* 36bits PSE 4MB page */
- context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
+ rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
else
/* 32 bits PSE 4MB page */
- context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
+ rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
break;
case PT32E_ROOT_LEVEL:
- context->rsvd_bits_mask[0][2] =
+ rsvd_check->rsvd_bits_mask[0][2] =
rsvd_bits(maxphyaddr, 63) |
rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */
- context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+ rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62); /* PDE */
- context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+ rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62); /* PTE */
- context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+ rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+ rsvd_check->rsvd_bits_mask[1][0] =
+ rsvd_check->rsvd_bits_mask[0][0];
break;
case PT64_ROOT_LEVEL:
- context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
- nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51);
- context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
- nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51);
- context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+ rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
+ nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
rsvd_bits(maxphyaddr, 51);
- context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+ rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
+ nonleaf_bit8_rsvd | gbpages_bit_rsvd |
rsvd_bits(maxphyaddr, 51);
- context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
- context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
+ rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+ rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+ rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[1][3] =
+ rsvd_check->rsvd_bits_mask[0][3];
+ rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 29);
- context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+ rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 51) |
rsvd_bits(13, 20); /* large page */
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+ rsvd_check->rsvd_bits_mask[1][0] =
+ rsvd_check->rsvd_bits_mask[0][0];
break;
}
}
-static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context, bool execonly)
+static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- int maxphyaddr = cpuid_maxphyaddr(vcpu);
+ __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
+ cpuid_maxphyaddr(vcpu), context->root_level,
+ context->nx, guest_cpuid_has_gbpages(vcpu),
+ is_pse(vcpu));
+}
+
+static void
+__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
+ int maxphyaddr, bool execonly)
+{
int pte;
- context->rsvd_bits_mask[0][3] =
+ rsvd_check->rsvd_bits_mask[0][3] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
- context->rsvd_bits_mask[0][2] =
+ rsvd_check->rsvd_bits_mask[0][2] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
- context->rsvd_bits_mask[0][1] =
+ rsvd_check->rsvd_bits_mask[0][1] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
- context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
+ rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
/* large page */
- context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
- context->rsvd_bits_mask[1][2] =
+ rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
+ rsvd_check->rsvd_bits_mask[1][2] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
- context->rsvd_bits_mask[1][1] =
+ rsvd_check->rsvd_bits_mask[1][1] =
rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
- context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+ rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
for (pte = 0; pte < 64; pte++) {
int rwx_bits = pte & 7;
@@ -3707,10 +3729,64 @@
if (mt == 0x2 || mt == 0x3 || mt == 0x7 ||
rwx_bits == 0x2 || rwx_bits == 0x6 ||
(rwx_bits == 0x4 && !execonly))
- context->bad_mt_xwr |= (1ull << pte);
+ rsvd_check->bad_mt_xwr |= (1ull << pte);
}
}
+static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context, bool execonly)
+{
+ __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
+ cpuid_maxphyaddr(vcpu), execonly);
+}
+
+/*
+ * the page table on host is the shadow page table for the page
+ * table in guest or amd nested guest, its mmu features completely
+ * follow the features in guest.
+ */
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+{
+ __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits,
+ context->shadow_root_level, context->nx,
+ guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+}
+EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
+
+/*
+ * the direct page table on host, use as much mmu features as
+ * possible, however, kvm currently does not do execution-protection.
+ */
+static void
+reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
+{
+ if (guest_cpuid_is_amd(vcpu))
+ __reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits,
+ context->shadow_root_level, false,
+ cpu_has_gbpages, true);
+ else
+ __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits,
+ false);
+
+}
+
+/*
+ * as the comments in reset_shadow_zero_bits_mask() except it
+ * is the shadow page table for intel nested guest.
+ */
+static void
+reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context, bool execonly)
+{
+ __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+ boot_cpu_data.x86_phys_bits, execonly);
+}
+
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu, bool ept)
{
@@ -3889,6 +3965,7 @@
update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
+ reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
@@ -3916,6 +3993,7 @@
context->base_role.smap_andnot_wp
= smap && !is_write_protection(vcpu);
context->base_role.smm = is_smm(vcpu);
+ reset_shadow_zero_bits_mask(vcpu, context);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
@@ -3939,6 +4017,7 @@
update_permission_bitmask(vcpu, context, true);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
+ reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
@@ -4860,28 +4939,6 @@
return nr_mmu_pages;
}
-int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
-{
- struct kvm_shadow_walk_iterator iterator;
- u64 spte;
- int nr_sptes = 0;
-
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
- return nr_sptes;
-
- walk_shadow_page_lockless_begin(vcpu);
- for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
- sptes[iterator.level-1] = spte;
- nr_sptes++;
- if (!is_shadow_present_pte(spte))
- break;
- }
- walk_shadow_page_lockless_end(vcpu);
-
- return nr_sptes;
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
-
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 398d21c..e4202e4 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -50,9 +50,11 @@
return ((1ULL << (e - s + 1)) - 1) << s;
}
-int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+
/*
* Return values of handle_mmio_page_fault_common:
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0f67d7e..736e6ab 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -128,14 +128,6 @@
*access &= mask;
}
-static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
-{
- int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f;
-
- return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) |
- ((mmu->bad_mt_xwr & (1ull << low6)) != 0);
-}
-
static inline int FNAME(is_present_gpte)(unsigned long pte)
{
#if PTTYPE != PTTYPE_EPT
@@ -172,7 +164,7 @@
struct kvm_mmu_page *sp, u64 *spte,
u64 gpte)
{
- if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+ if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present;
if (!FNAME(is_present_gpte)(gpte))
@@ -353,8 +345,7 @@
if (unlikely(!FNAME(is_present_gpte)(pte)))
goto error;
- if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
- walker->level))) {
+ if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
goto error;
}
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 886aa25..39b9112 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -133,8 +133,6 @@
/* MSR_K7_PERFCTRn */
pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
if (pmc) {
- if (!msr_info->host_initiated)
- data = (s64)data;
pmc->counter += data - pmc_read_counter(pmc);
return 0;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8e0c084..74d8257 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1173,6 +1173,10 @@
if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
return 0;
+ if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
+ kvm_read_cr0(vcpu) & X86_CR0_CD)
+ return _PAGE_NOCACHE;
+
mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
return mtrr2protval[mtrr];
}
@@ -1667,13 +1671,10 @@
if (!vcpu->fpu_active)
cr0 |= X86_CR0_TS;
- /*
- * re-enable caching here because the QEMU bios
- * does not do it - this results in some delay at
- * reboot
- */
- if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
- cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+
+ /* These are emulated via page tables. */
+ cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+
svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR);
update_cr0_intercept(svm);
@@ -2106,6 +2107,7 @@
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level();
+ reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 83b7b5c..da1590e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2443,10 +2443,10 @@
CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
#endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
- CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
- CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
- CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW |
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
+ CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
+ CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
+ CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/*
* We can allow some features even when not supported by the
* hardware. For example, L1 can specify an MSR bitmap - and we
@@ -3423,12 +3423,12 @@
vmx_segment_cache_clear(to_vmx(vcpu));
guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
- if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
+ if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
pr_debug_ratelimited("%s: tss fixup for long mode. \n",
__func__);
vmcs_write32(GUEST_TR_AR_BYTES,
- (guest_tr_ar & ~AR_TYPE_MASK)
- | AR_TYPE_BUSY_64_TSS);
+ (guest_tr_ar & ~VMX_AR_TYPE_MASK)
+ | VMX_AR_TYPE_BUSY_64_TSS);
}
vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
}
@@ -3719,7 +3719,7 @@
return 0;
else {
int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
- return AR_DPL(ar);
+ return VMX_AR_DPL(ar);
}
}
@@ -3847,11 +3847,11 @@
if (cs.unusable)
return false;
- if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
+ if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
return false;
if (!cs.s)
return false;
- if (cs.type & AR_TYPE_WRITEABLE_MASK) {
+ if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
if (cs.dpl > cs_rpl)
return false;
} else {
@@ -3901,7 +3901,7 @@
return false;
if (!var.present)
return false;
- if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
+ if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
if (var.dpl < rpl) /* DPL < RPL */
return false;
}
@@ -5759,73 +5759,9 @@
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
}
-static u64 ept_rsvd_mask(u64 spte, int level)
-{
- int i;
- u64 mask = 0;
-
- for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
- mask |= (1ULL << i);
-
- if (level == 4)
- /* bits 7:3 reserved */
- mask |= 0xf8;
- else if (spte & (1ULL << 7))
- /*
- * 1GB/2MB page, bits 29:12 or 20:12 reserved respectively,
- * level == 1 if the hypervisor is using the ignored bit 7.
- */
- mask |= (PAGE_SIZE << ((level - 1) * 9)) - PAGE_SIZE;
- else if (level > 1)
- /* bits 6:3 reserved */
- mask |= 0x78;
-
- return mask;
-}
-
-static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
- int level)
-{
- printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
-
- /* 010b (write-only) */
- WARN_ON((spte & 0x7) == 0x2);
-
- /* 110b (write/execute) */
- WARN_ON((spte & 0x7) == 0x6);
-
- /* 100b (execute-only) and value not supported by logical processor */
- if (!cpu_has_vmx_ept_execute_only())
- WARN_ON((spte & 0x7) == 0x4);
-
- /* not 000b */
- if ((spte & 0x7)) {
- u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
-
- if (rsvd_bits != 0) {
- printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
- __func__, rsvd_bits);
- WARN_ON(1);
- }
-
- /* bits 5:3 are _not_ reserved for large page or leaf page */
- if ((rsvd_bits & 0x38) == 0) {
- u64 ept_mem_type = (spte & 0x38) >> 3;
-
- if (ept_mem_type == 2 || ept_mem_type == 3 ||
- ept_mem_type == 7) {
- printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
- __func__, ept_mem_type);
- WARN_ON(1);
- }
- }
- }
-}
-
static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
{
- u64 sptes[4];
- int nr_sptes, i, ret;
+ int ret;
gpa_t gpa;
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@@ -5846,13 +5782,7 @@
return 1;
/* It is the real ept misconfig */
- printk(KERN_ERR "EPT: Misconfiguration.\n");
- printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
-
- nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
-
- for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
- ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
+ WARN_ON(1);
vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
@@ -6246,6 +6176,11 @@
return handle_nop(vcpu);
}
+static int handle_monitor_trap(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
static int handle_monitor(struct kvm_vcpu *vcpu)
{
printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
@@ -6408,8 +6343,12 @@
*/
static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
unsigned long exit_qualification,
- u32 vmx_instruction_info, gva_t *ret)
+ u32 vmx_instruction_info, bool wr, gva_t *ret)
{
+ gva_t off;
+ bool exn;
+ struct kvm_segment s;
+
/*
* According to Vol. 3B, "Information for VM Exits Due to Instruction
* Execution", on an exit, vmx_instruction_info holds most of the
@@ -6434,22 +6373,63 @@
/* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */
- *ret = vmx_get_segment_base(vcpu, seg_reg);
+ off = exit_qualification; /* holds the displacement */
if (base_is_valid)
- *ret += kvm_register_read(vcpu, base_reg);
+ off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
- *ret += kvm_register_read(vcpu, index_reg)<<scaling;
- *ret += exit_qualification; /* holds the displacement */
+ off += kvm_register_read(vcpu, index_reg)<<scaling;
+ vmx_get_segment(vcpu, &s, seg_reg);
+ *ret = s.base + off;
if (addr_size == 1) /* 32 bit */
*ret &= 0xffffffff;
- /*
- * TODO: throw #GP (and return 1) in various cases that the VM*
- * instructions require it - e.g., offset beyond segment limit,
- * unusable or unreadable/unwritable segment, non-canonical 64-bit
- * address, and so on. Currently these are not checked.
- */
+ /* Checks for #GP/#SS exceptions. */
+ exn = false;
+ if (is_protmode(vcpu)) {
+ /* Protected mode: apply checks for segment validity in the
+ * following order:
+ * - segment type check (#GP(0) may be thrown)
+ * - usability check (#GP(0)/#SS(0))
+ * - limit check (#GP(0)/#SS(0))
+ */
+ if (wr)
+ /* #GP(0) if the destination operand is located in a
+ * read-only data segment or any code segment.
+ */
+ exn = ((s.type & 0xa) == 0 || (s.type & 8));
+ else
+ /* #GP(0) if the source operand is located in an
+ * execute-only code segment
+ */
+ exn = ((s.type & 0xa) == 8);
+ }
+ if (exn) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
+ if (is_long_mode(vcpu)) {
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is an only check for long mode.
+ */
+ exn = is_noncanonical_address(*ret);
+ } else if (is_protmode(vcpu)) {
+ /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
+ */
+ exn = (s.unusable != 0);
+ /* Protected mode: #GP(0)/#SS(0) if the memory
+ * operand is outside the segment limit.
+ */
+ exn = exn || (off + sizeof(u64) > s.limit);
+ }
+ if (exn) {
+ kvm_queue_exception_e(vcpu,
+ seg_reg == VCPU_SREG_SS ?
+ SS_VECTOR : GP_VECTOR,
+ 0);
+ return 1;
+ }
+
return 0;
}
@@ -6471,7 +6451,7 @@
int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+ vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
@@ -6999,7 +6979,7 @@
field_value);
} else {
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, &gva))
+ vmx_instruction_info, true, &gva))
return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
@@ -7036,7 +7016,7 @@
(((vmx_instruction_info) >> 3) & 0xf));
else {
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, &gva))
+ vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
@@ -7128,7 +7108,7 @@
return 1;
if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, &vmcs_gva))
+ vmx_instruction_info, true, &vmcs_gva))
return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
@@ -7184,7 +7164,7 @@
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, &gva))
+ vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
sizeof(operand), &e)) {
@@ -7282,6 +7262,7 @@
[EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
+ [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
[EXIT_REASON_INVEPT] = handle_invept,
[EXIT_REASON_INVVPID] = handle_invvpid,
@@ -7542,6 +7523,8 @@
return true;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+ case EXIT_REASON_MONITOR_TRAP_FLAG:
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
case EXIT_REASON_MONITOR_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
case EXIT_REASON_PAUSE_INSTRUCTION:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5ef2560..4bbc2a1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -29,6 +29,7 @@
#include "cpuid.h"
#include "assigned-dev.h"
#include "pmu.h"
+#include "hyperv.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
@@ -221,11 +222,9 @@
void kvm_define_shared_msr(unsigned slot, u32 msr)
{
BUG_ON(slot >= KVM_NR_SHARED_MSRS);
+ shared_msrs_global.msrs[slot] = msr;
if (slot >= shared_msrs_global.nr)
shared_msrs_global.nr = slot + 1;
- shared_msrs_global.msrs[slot] = msr;
- /* we need ensured the shared_msr_global have been updated */
- smp_wmb();
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
@@ -526,7 +525,8 @@
}
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if (is_present_gpte(pdpte[i]) &&
- (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
+ (pdpte[i] &
+ vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
ret = 0;
goto out;
}
@@ -949,6 +949,8 @@
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
+ HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
+ HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN,
@@ -1217,11 +1219,6 @@
__func__, base_khz, scaled_khz, shift, *pmultiplier);
}
-static inline u64 get_kernel_ns(void)
-{
- return ktime_get_boot_ns();
-}
-
#ifdef CONFIG_X86_64
static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
#endif
@@ -1869,123 +1866,6 @@
return r;
}
-static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
-{
- return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
-}
-
-static bool kvm_hv_msr_partition_wide(u32 msr)
-{
- bool r = false;
- switch (msr) {
- case HV_X64_MSR_GUEST_OS_ID:
- case HV_X64_MSR_HYPERCALL:
- case HV_X64_MSR_REFERENCE_TSC:
- case HV_X64_MSR_TIME_REF_COUNT:
- r = true;
- break;
- }
-
- return r;
-}
-
-static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
-{
- struct kvm *kvm = vcpu->kvm;
-
- switch (msr) {
- case HV_X64_MSR_GUEST_OS_ID:
- kvm->arch.hv_guest_os_id = data;
- /* setting guest os id to zero disables hypercall page */
- if (!kvm->arch.hv_guest_os_id)
- kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
- break;
- case HV_X64_MSR_HYPERCALL: {
- u64 gfn;
- unsigned long addr;
- u8 instructions[4];
-
- /* if guest os id is not set hypercall should remain disabled */
- if (!kvm->arch.hv_guest_os_id)
- break;
- if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
- kvm->arch.hv_hypercall = data;
- break;
- }
- gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr))
- return 1;
- kvm_x86_ops->patch_hypercall(vcpu, instructions);
- ((unsigned char *)instructions)[3] = 0xc3; /* ret */
- if (__copy_to_user((void __user *)addr, instructions, 4))
- return 1;
- kvm->arch.hv_hypercall = data;
- mark_page_dirty(kvm, gfn);
- break;
- }
- case HV_X64_MSR_REFERENCE_TSC: {
- u64 gfn;
- HV_REFERENCE_TSC_PAGE tsc_ref;
- memset(&tsc_ref, 0, sizeof(tsc_ref));
- kvm->arch.hv_tsc_page = data;
- if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
- break;
- gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
- if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
- &tsc_ref, sizeof(tsc_ref)))
- return 1;
- mark_page_dirty(kvm, gfn);
- break;
- }
- default:
- vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
- "data 0x%llx\n", msr, data);
- return 1;
- }
- return 0;
-}
-
-static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
-{
- switch (msr) {
- case HV_X64_MSR_APIC_ASSIST_PAGE: {
- u64 gfn;
- unsigned long addr;
-
- if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
- vcpu->arch.hv_vapic = data;
- if (kvm_lapic_enable_pv_eoi(vcpu, 0))
- return 1;
- break;
- }
- gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
- addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
- if (kvm_is_error_hva(addr))
- return 1;
- if (__clear_user((void __user *)addr, PAGE_SIZE))
- return 1;
- vcpu->arch.hv_vapic = data;
- kvm_vcpu_mark_page_dirty(vcpu, gfn);
- if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
- return 1;
- break;
- }
- case HV_X64_MSR_EOI:
- return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
- case HV_X64_MSR_ICR:
- return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
- case HV_X64_MSR_TPR:
- return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
- default:
- vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
- "data 0x%llx\n", msr, data);
- return 1;
- }
-
- return 0;
-}
-
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
@@ -2105,7 +1985,7 @@
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
- kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+ adjust_tsc_offset_guest(vcpu, adj);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
@@ -2224,15 +2104,10 @@
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
- if (kvm_hv_msr_partition_wide(msr)) {
- int r;
- mutex_lock(&vcpu->kvm->lock);
- r = set_msr_hyperv_pw(vcpu, msr, data);
- mutex_unlock(&vcpu->kvm->lock);
- return r;
- } else
- return set_msr_hyperv(vcpu, msr, data);
- break;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ case HV_X64_MSR_CRASH_CTL:
+ return kvm_hv_set_msr_common(vcpu, msr, data,
+ msr_info->host_initiated);
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
@@ -2315,68 +2190,6 @@
return 0;
}
-static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
-{
- u64 data = 0;
- struct kvm *kvm = vcpu->kvm;
-
- switch (msr) {
- case HV_X64_MSR_GUEST_OS_ID:
- data = kvm->arch.hv_guest_os_id;
- break;
- case HV_X64_MSR_HYPERCALL:
- data = kvm->arch.hv_hypercall;
- break;
- case HV_X64_MSR_TIME_REF_COUNT: {
- data =
- div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
- break;
- }
- case HV_X64_MSR_REFERENCE_TSC:
- data = kvm->arch.hv_tsc_page;
- break;
- default:
- vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
- return 1;
- }
-
- *pdata = data;
- return 0;
-}
-
-static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
-{
- u64 data = 0;
-
- switch (msr) {
- case HV_X64_MSR_VP_INDEX: {
- int r;
- struct kvm_vcpu *v;
- kvm_for_each_vcpu(r, v, vcpu->kvm) {
- if (v == vcpu) {
- data = r;
- break;
- }
- }
- break;
- }
- case HV_X64_MSR_EOI:
- return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
- case HV_X64_MSR_ICR:
- return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
- case HV_X64_MSR_TPR:
- return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
- case HV_X64_MSR_APIC_ASSIST_PAGE:
- data = vcpu->arch.hv_vapic;
- break;
- default:
- vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
- return 1;
- }
- *pdata = data;
- return 0;
-}
-
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
switch (msr_info->index) {
@@ -2493,14 +2306,10 @@
msr_info->data = 0x20000000;
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
- if (kvm_hv_msr_partition_wide(msr_info->index)) {
- int r;
- mutex_lock(&vcpu->kvm->lock);
- r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data);
- mutex_unlock(&vcpu->kvm->lock);
- return r;
- } else
- return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data);
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ case HV_X64_MSR_CRASH_CTL:
+ return kvm_hv_get_msr_common(vcpu,
+ msr_info->index, &msr_info->data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current
@@ -2651,6 +2460,7 @@
case KVM_CAP_TSC_DEADLINE_TIMER:
case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_DISABLE_QUIRKS:
+ case KVM_CAP_SET_BOOT_CPU_ID:
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_PCI_2_3:
@@ -3817,30 +3627,25 @@
r = kvm_ioapic_init(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
- &vpic->dev_master);
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
- &vpic->dev_slave);
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
- &vpic->dev_eclr);
+ kvm_destroy_pic(vpic);
mutex_unlock(&kvm->slots_lock);
- kfree(vpic);
goto create_irqchip_unlock;
}
} else
goto create_irqchip_unlock;
- smp_wmb();
- kvm->arch.vpic = vpic;
- smp_wmb();
r = kvm_setup_default_irq_routing(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->irq_lock);
kvm_ioapic_destroy(kvm);
- kvm_destroy_pic(kvm);
+ kvm_destroy_pic(vpic);
mutex_unlock(&kvm->irq_lock);
mutex_unlock(&kvm->slots_lock);
+ goto create_irqchip_unlock;
}
+ /* Write kvm->irq_routing before kvm->arch.vpic. */
+ smp_wmb();
+ kvm->arch.vpic = vpic;
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
@@ -3967,6 +3772,15 @@
r = kvm_vm_ioctl_reinject(kvm, &control);
break;
}
+ case KVM_SET_BOOT_CPU_ID:
+ r = 0;
+ mutex_lock(&kvm->lock);
+ if (atomic_read(&kvm->online_vcpus) != 0)
+ r = -EBUSY;
+ else
+ kvm->arch.bsp_vcpu_id = arg;
+ mutex_unlock(&kvm->lock);
+ break;
case KVM_XEN_HVM_CONFIG: {
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
@@ -5882,66 +5696,6 @@
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
-int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
-{
- u64 param, ingpa, outgpa, ret;
- uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
- bool fast, longmode;
-
- /*
- * hypercall generates UD from non zero cpl and real mode
- * per HYPER-V spec
- */
- if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 0;
- }
-
- longmode = is_64_bit_mode(vcpu);
-
- if (!longmode) {
- param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
- ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
- outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
- }
-#ifdef CONFIG_X86_64
- else {
- param = kvm_register_read(vcpu, VCPU_REGS_RCX);
- ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
- outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
- }
-#endif
-
- code = param & 0xffff;
- fast = (param >> 16) & 0x1;
- rep_cnt = (param >> 32) & 0xfff;
- rep_idx = (param >> 48) & 0xfff;
-
- trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
-
- switch (code) {
- case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
- kvm_vcpu_on_spin(vcpu);
- break;
- default:
- res = HV_STATUS_INVALID_HYPERCALL_CODE;
- break;
- }
-
- ret = res | (((u64)rep_done & 0xfff) << 32);
- if (longmode) {
- kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
- } else {
- kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
- kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
- }
-
- return 1;
-}
-
/*
* kvm_pv_kick_cpu_op: Kick a vcpu.
*
@@ -6327,6 +6081,7 @@
static void process_smi(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
+ struct desc_ptr dt;
char buf[512];
u32 cr0;
@@ -6359,6 +6114,10 @@
kvm_x86_ops->set_cr4(vcpu, 0);
+ /* Undocumented: IDT limit is set to zero on entry to SMM. */
+ dt.address = dt.size = 0;
+ kvm_x86_ops->set_idt(vcpu, &dt);
+
__kvm_set_dr(vcpu, 7, DR7_FIXED_1);
cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
@@ -6513,6 +6272,12 @@
vcpu_scan_ioapic(vcpu);
if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
kvm_vcpu_reload_apic_access_page(vcpu);
+ if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
+ r = 0;
+ goto out;
+ }
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -7535,6 +7300,17 @@
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
+
+bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
+}
+
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{
return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 0ca2f3e..2f822cd 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@
return kvm_register_write(vcpu, reg, val);
}
+static inline u64 get_kernel_ns(void)
+{
+ return ktime_get_boot_ns();
+}
+
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
{
return !(kvm->arch.disabled_quirks & quirk);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index f37e84a..3d8f2e4 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
-#include <asm/desc.h>
#include <asm/user.h>
#include <asm/fpu/internal.h>
@@ -181,7 +180,7 @@
math_abort(FPU_info, SIGILL);
}
- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 9ccecb6..5e044d5 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+ static struct desc_struct zero_desc;
+ struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ seg >>= 3;
+ mutex_lock(¤t->mm->context.lock);
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ ret = current->mm->context.ldt->entries[seg];
+ mutex_unlock(¤t->mm->context.lock);
+#endif
+ return ret;
+}
+
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99..8300db7 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
#include <linux/stddef.h>
#include <asm/uaccess.h>
-#include <asm/desc.h>
#include "fpu_system.h"
#include "exception.h"
@@ -158,7 +157,7 @@
addr->selector = PM_REG_(segment);
}
- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+ descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index e88fda8..4841453 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -8,7 +8,7 @@
select PARAVIRT_CLOCK
select XEN_HAVE_PVMMU
depends on X86_64 || (X86_32 && X86_PAE)
- depends on X86_TSC
+ depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@
config XEN_DOM0
def_bool y
depends on XEN && PCI_XEN && SWIOTLB_XEN
- depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+ depends on X86_IO_APIC && ACPI && PCI
config XEN_PVHVM
def_bool y
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755..4b6e29a 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@
obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o platform-pci-unplug.o \
- p2m.o
+ p2m.o apic.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-obj-$(CONFIG_XEN_DOM0) += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29..2292721 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@
#ifdef CONFIG_XEN_DOM0
void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
#else
static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
size_t size)
{
}
-static inline void __init xen_init_apic(void)
-{
-}
#endif
+void __init xen_init_apic(void);
+
#ifdef CONFIG_XEN_EFI
extern void xen_efi_init(void);
#else
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bf..e0057d0 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the combined capabilities of I/O
- * controller and storage device.
+ * the device driver based upon the capabilities of the I/O
+ * controller.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index a3da677..b8efe36 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -393,8 +393,6 @@
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
@@ -412,27 +410,19 @@
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
- struct scatterlist *assoc1;
- struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
@@ -580,27 +568,19 @@
cryptlen += ivsize;
}
- if (sg_is_last(assoc))
- return -EINVAL;
-
- assoc1 = assoc + 1;
- if (sg_is_last(assoc1))
- return -EINVAL;
-
- assoc2 = assoc + 2;
- if (!sg_is_last(assoc2))
+ if (assoc->length < 12)
return -EINVAL;
sg_init_table(hsg, 2);
- sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
- sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+ sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+ sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
sg_init_table(tsg, 1);
- sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+ sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
areq_ctx->cryptlen = cryptlen;
- areq_ctx->headlen = assoc->length + assoc2->length;
- areq_ctx->trailen = assoc1->length;
+ areq_ctx->headlen = 8;
+ areq_ctx->trailen = 4;
areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done;
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 628a42c..cf0fd96 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -702,11 +702,11 @@
u16 flags = to_nfit_memdev(dev)->flags;
return sprintf(buf, "%s%s%s%s%s\n",
- flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
- flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
- flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
- flags & ACPI_NFIT_MEM_ARMED ? "arm " : "",
- flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : "");
+ flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
+ flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
+ flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
+ flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "",
+ flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
}
static DEVICE_ATTR_RO(flags);
@@ -849,12 +849,12 @@
if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
continue;
- dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n",
+ dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
nvdimm_name(nvdimm),
- mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
- mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
- mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
- mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : "");
+ mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
+ mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
+ mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
+ mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : "");
}
@@ -1024,7 +1024,7 @@
wmb_pmem();
}
-static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
+static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
u64 offset = nfit_blk->stat_offset + mmio->size * bw;
@@ -1032,7 +1032,7 @@
if (mmio->num_lines)
offset = to_interleave_offset(offset, mmio);
- return readq(mmio->base + offset);
+ return readl(mmio->base + offset);
}
static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 815f75e..2922f1f 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <acpi/video.h>
ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@
static bool backlight_notifier_registered;
static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@
{ },
};
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+ if (acpi_video_get_backlight_type() != acpi_backlight_video)
+ acpi_video_unregister_backlight();
+}
+
static int acpi_video_backlight_notify(struct notifier_block *nb,
unsigned long val, void *bd)
{
@@ -269,9 +278,8 @@
/* A raw bl registering may change video -> native */
if (backlight->props.type == BACKLIGHT_RAW &&
- val == BACKLIGHT_REGISTERED &&
- acpi_video_get_backlight_type() != acpi_backlight_video)
- acpi_video_unregister_backlight();
+ val == BACKLIGHT_REGISTERED)
+ schedule_work(&backlight_notify_work);
return NOTIFY_OK;
}
@@ -304,6 +312,8 @@
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, find_video, NULL,
&video_caps, NULL);
+ INIT_WORK(&backlight_notify_work,
+ acpi_video_backlight_notify_work);
backlight_nb.notifier_call = acpi_video_backlight_notify;
backlight_nb.priority = 0;
if (backlight_register_notifier(&backlight_nb) == 0)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index ce1e3a8..14b7305 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -92,7 +92,7 @@
* Other architectures (e.g., ARM) either do not support big endian, or
* else leave I/O in little endian mode.
*/
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(addr);
else
return readl_relaxed(addr);
@@ -101,7 +101,7 @@
static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
{
/* See brcm_sata_readreg() comments */
- if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, addr);
else
writel_relaxed(val, addr);
@@ -209,6 +209,7 @@
priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
}
+#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@
brcm_sata_phys_enable(priv);
return ahci_platform_resume(dev);
}
+#endif
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index db5d9f7..790e0de 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -694,11 +694,11 @@
* RETURNS:
* Block address read from @tf.
*/
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
- if (!dev || tf->flags & ATA_TFLAG_LBA) {
+ if (tf->flags & ATA_TFLAG_LBA) {
if (tf->flags & ATA_TFLAG_LBA48) {
block |= (u64)tf->hob_lbah << 40;
block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@
return 0;
}
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
- unsigned int err_mask;
-
- if (!ata_id_has_sense_reporting(dev->id))
- return;
-
- if (ata_id_sense_reporting_enabled(dev->id))
- return;
-
- err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to enable Sense Data Reporting, Emask 0x%x\n",
- err_mask);
- }
-}
-
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -2387,7 +2369,7 @@
dev->devslp_timing[i] = sata_setting[j];
}
}
- ata_dev_config_sense_reporting(dev);
+
dev->cdb_len = 16;
}
@@ -4248,6 +4230,8 @@
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7465031..cb0508a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1592,8 +1592,6 @@
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
- if (ata_id_has_ncq_autosense(dev->id))
- tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@@ -1630,70 +1628,6 @@
}
/**
- * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- * @dfl_sense_key: default sense key to use
- *
- * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- * SENSE. This function is EH helper.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * encoded sense data on success, 0 on failure or if sense data
- * is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
- struct scsi_cmnd *cmd)
-{
- struct ata_device *dev = qc->dev;
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- if (!cmd)
- return 0;
-
- DPRINTK("ATA request sense\n");
- ata_dev_warn(dev, "request sense\n");
- if (!ata_id_sense_reporting_enabled(dev->id)) {
- ata_dev_warn(qc->dev, "sense data reporting disabled\n");
- return 0;
- }
- ata_tf_init(dev, &tf);
-
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
- tf.command = ATA_CMD_REQ_SENSE_DATA;
- tf.protocol = ATA_PROT_NODATA;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- /*
- * ACS-4 states:
- * The device may set the SENSE DATA AVAILABLE bit to one in the
- * STATUS field and clear the ERROR bit to zero in the STATUS field
- * to indicate that the command returned completion without an error
- * and the sense data described in table 306 is available.
- *
- * IOW the 'ATA_SENSE' bit might not be set even though valid
- * sense data is available.
- * So check for both.
- */
- if ((tf.command & ATA_SENSE) ||
- tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
- ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
- tf.lbah, tf.lbam, tf.lbal);
- } else {
- ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
- tf.command, err_mask);
- }
- return err_mask;
-}
-
-/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1855,19 +1789,6 @@
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
- if (qc->result_tf.auxiliary) {
- char sense_key, asc, ascq;
-
- sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
- asc = (qc->result_tf.auxiliary >> 8) & 0xff;
- ascq = qc->result_tf.auxiliary & 0xff;
- ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
- sense_key, asc, ascq);
- ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
- ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
- qc->flags |= ATA_QCFLAG_SENSE_VALID;
- }
-
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@@ -1897,27 +1818,6 @@
return ATA_EH_RESET;
}
- /*
- * Sense data reporting does not work if the
- * device fault bit is set.
- */
- if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
- !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
- if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
- tmp = ata_eh_request_sense(qc, qc->scsicmd);
- if (tmp)
- qc->err_mask |= tmp;
- else
- ata_scsi_set_sense_information(qc->scsicmd, tf);
- } else {
- ata_dev_warn(qc->dev, "sense data available but port frozen\n");
- }
- }
-
- /* Set by NCQ autosense or request sense above */
- if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- return 0;
-
if (stat & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
else
@@ -2661,15 +2561,14 @@
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_SENSE | ATA_ERR)) {
+ ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
- res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 641a61a..0d7f0da 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,28 +270,13 @@
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
{
- if (!cmd)
- return;
-
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
}
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf)
-{
- u64 information;
-
- if (!cmd)
- return;
-
- information = ata_tf_read_block(tf, NULL);
- scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
static ssize_t
ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1792,9 +1777,7 @@
((cdb[2] & 0x20) || need_sense)) {
ata_gen_passthru_sense(qc);
} else {
- if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- } else if (!need_sense) {
+ if (!need_sense) {
cmd->result = SAM_STAT_GOOD;
} else {
/* TODO: decide which descriptor format to use
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a998a17..f840ca1 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,8 +67,7 @@
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
- struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
- const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 3a18a8a..fab504f 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1238,8 +1238,12 @@
readl(mmio + PDC_SDRAM_CONTROL);
/* Turn on for ECC */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@
/* ECC initiliazation. */
- pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
- PDC_DIMM_SPD_TYPE, &spd0);
+ if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+ PDC_DIMM_SPD_TYPE, &spd0)) {
+ pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ return 1;
+ }
if (spd0 == 0x02) {
void *buf;
VPRINTK("Start ECC initialization\n");
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a4..56486d9 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@
if (!blk)
return -ENOMEM;
- present = krealloc(rbnode->cache_present,
- BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
- if (!present) {
- kfree(blk);
- return -ENOMEM;
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ GFP_KERNEL);
+ if (!present) {
+ kfree(blk);
+ return -ENOMEM;
+ }
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+ * sizeof(*present));
+ } else {
+ present = rbnode->cache_present;
}
/* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 4a2ef09..f504232 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3756,6 +3756,14 @@
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
+ /*
+ * For flush requests, request_idx starts at the end of the
+ * tag space. Since we don't support FLUSH/FUA, simply return
+ * 0 as there's nothing to be done.
+ */
+ if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
+ return 0;
+
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
&cmd->command_dma, GFP_KERNEL);
if (!cmd->command)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced9677..954c002 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@
return;
}
- if (work_pending(&blkif->persistent_purge_work)) {
- pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+ if (work_busy(&blkif->persistent_purge_work)) {
+ pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
return;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed3..7a8a73f 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -179,6 +179,7 @@
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
static int blkfront_setup_indirect(struct blkfront_info *info);
+static int blkfront_gather_backend_features(struct blkfront_info *info);
static int get_id_from_freelist(struct blkfront_info *info)
{
@@ -1128,8 +1129,10 @@
* Add the used indirect page back to the list of
* available pages for indirect grefs.
*/
- indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
- list_add(&indirect_page->lru, &info->indirect_pages);
+ if (!info->feature_persistent) {
+ indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+ list_add(&indirect_page->lru, &info->indirect_pages);
+ }
s->indirect_grants[i]->gref = GRANT_INVALID_REF;
list_add_tail(&s->indirect_grants[i]->node, &info->grants);
}
@@ -1519,7 +1522,7 @@
info->shadow_free = info->ring.req_prod_pvt;
info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
- rc = blkfront_setup_indirect(info);
+ rc = blkfront_gather_backend_features(info);
if (rc) {
kfree(copy);
return rc;
@@ -1720,20 +1723,13 @@
static int blkfront_setup_indirect(struct blkfront_info *info)
{
- unsigned int indirect_segments, segs;
+ unsigned int segs;
int err, i;
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-max-indirect-segments", "%u", &indirect_segments,
- NULL);
- if (err) {
- info->max_indirect_segments = 0;
+ if (info->max_indirect_segments == 0)
segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
- } else {
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ else
segs = info->max_indirect_segments;
- }
err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
if (err)
@@ -1797,6 +1793,68 @@
}
/*
+ * Gather all backend feature-*
+ */
+static int blkfront_gather_backend_features(struct blkfront_info *info)
+{
+ int err;
+ int barrier, flush, discard, persistent;
+ unsigned int indirect_segments;
+
+ info->feature_flush = 0;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-barrier", "%d", &barrier,
+ NULL);
+
+ /*
+ * If there's no "feature-barrier" defined, then it means
+ * we're dealing with a very old backend which writes
+ * synchronously; nothing to do.
+ *
+ * If there are barriers, then we use flush.
+ */
+ if (!err && barrier)
+ info->feature_flush = REQ_FLUSH | REQ_FUA;
+ /*
+ * And if there is "feature-flush-cache" use that above
+ * barriers.
+ */
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-flush-cache", "%d", &flush,
+ NULL);
+
+ if (!err && flush)
+ info->feature_flush = REQ_FLUSH;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-discard", "%d", &discard,
+ NULL);
+
+ if (!err && discard)
+ blkfront_setup_discard(info);
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-persistent", "%u", &persistent,
+ NULL);
+ if (err)
+ info->feature_persistent = 0;
+ else
+ info->feature_persistent = persistent;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u", &indirect_segments,
+ NULL);
+ if (err)
+ info->max_indirect_segments = 0;
+ else
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
+
+ return blkfront_setup_indirect(info);
+}
+
+/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
*/
@@ -1807,7 +1865,6 @@
unsigned int physical_sector_size;
unsigned int binfo;
int err;
- int barrier, flush, discard, persistent;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@
if (err != 1)
physical_sector_size = sector_size;
- info->feature_flush = 0;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-barrier", "%d", &barrier,
- NULL);
-
- /*
- * If there's no "feature-barrier" defined, then it means
- * we're dealing with a very old backend which writes
- * synchronously; nothing to do.
- *
- * If there are barriers, then we use flush.
- */
- if (!err && barrier)
- info->feature_flush = REQ_FLUSH | REQ_FUA;
- /*
- * And if there is "feature-flush-cache" use that above
- * barriers.
- */
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-flush-cache", "%d", &flush,
- NULL);
-
- if (!err && flush)
- info->feature_flush = REQ_FLUSH;
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-discard", "%d", &discard,
- NULL);
-
- if (!err && discard)
- blkfront_setup_discard(info);
-
- err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
- "feature-persistent", "%u", &persistent,
- NULL);
- if (err)
- info->feature_persistent = 0;
- else
- info->feature_persistent = persistent;
-
- err = blkfront_setup_indirect(info);
+ err = blkfront_gather_backend_features(info);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8..763301c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -496,10 +496,9 @@
kfree(meta);
}
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
{
size_t num_pages;
- char pool_name[8];
struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
@@ -512,7 +511,6 @@
goto out_error;
}
- snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@
return -EINVAL;
disksize = PAGE_ALIGN(disksize);
- meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+ meta = zram_meta_alloc(zram->disk->disk_name, disksize);
if (!meta)
return -ENOMEM;
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1e..ac03ba4 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -126,7 +126,7 @@
PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
div_hp, bit, is_lp, flags) \
PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c6..c96de14 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -661,6 +661,9 @@
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
}
@@ -669,6 +672,9 @@
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
+ if (!ch->cs_enabled)
+ return;
+
pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
sh_cmt_start(ch, FLAG_CLOCKSOURCE);
}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 2d59038..86c7eb6 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -462,6 +462,7 @@
BUG_ON(!imxtm->base);
imxtm->type = type;
+ imxtm->irq = irq;
_mxc_timer_init(imxtm);
}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index ae5b2bd..fa3dd84 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -180,7 +180,7 @@
ret = exynos5250_cpufreq_init(exynos_info);
} else {
pr_err("%s: Unknown SoC type\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
}
if (ret)
@@ -188,12 +188,14 @@
if (exynos_info->set_freq == NULL) {
dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+ ret = -EINVAL;
goto err_vdd_arm;
}
@@ -225,7 +227,7 @@
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
- return -EINVAL;
+ return ret;
}
static struct platform_driver exynos_cpufreq_platdrv = {
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index dae1e80..f9c7875 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -909,13 +909,14 @@
state->buflen_1;
u32 *sh_desc = ctx->sh_desc_fin, *desc;
dma_addr_t ptr = ctx->sh_desc_fin_dma;
- int sec4_sg_bytes;
+ int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
int ret = 0;
int sh_len;
- sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+ sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+ sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@
state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
buf, state->buf_dma, buflen,
last_buflen);
- (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+ (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 08f8d5c..becb738c 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -71,7 +71,6 @@
struct sha256_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process = 0, leftover, total;
unsigned long irq_flags;
@@ -97,7 +96,6 @@
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@
}
do {
- /*
- * to_process: the SHA256_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - to_process;
- to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len,
max_sg_len);
@@ -133,15 +126,27 @@
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index aff0fe5..b6e183d 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -71,7 +71,6 @@
struct sha512_state *sctx = shash_desc_ctx(desc);
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
- struct nx_sg *in_sg;
struct nx_sg *out_sg;
u64 to_process, leftover = 0, total;
unsigned long irq_flags;
@@ -97,7 +96,6 @@
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- in_sg = nx_ctx->in_sg;
max_sg_len = min_t(u64, nx_ctx->ap->sglen,
nx_driver.of.max_sg_len/sizeof(struct nx_sg));
max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@
}
do {
- /*
- * to_process: the SHA512_BLOCK_SIZE data chunk to process in
- * this update. This value is also restricted by the sg list
- * limits.
- */
- to_process = total - leftover;
- to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
- leftover = total - to_process;
+ int used_sgs = 0;
+ struct nx_sg *in_sg = nx_ctx->in_sg;
if (buf_len) {
data_len = buf_len;
- in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ in_sg = nx_build_sg_list(in_sg,
(u8 *) sctx->buf,
&data_len, max_sg_len);
@@ -133,8 +125,20 @@
rc = -EINVAL;
goto out;
}
+ used_sgs = in_sg - nx_ctx->in_sg;
}
+ /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+ * processed in this iteration. This value is restricted
+ * by sg list limits and number of sgs we already used
+ * for leftover data. (see above)
+ * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+ * but because data may not be aligned, we need to account
+ * for that too. */
+ to_process = min_t(u64, total,
+ (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
data_len = to_process - buf_len;
in_sg = nx_build_sg_list(in_sg, (u8 *) data,
&data_len, max_sg_len);
@@ -146,7 +150,7 @@
goto out;
}
- to_process = (data_len + buf_len);
+ to_process = data_len + buf_len;
leftover = total - to_process;
/*
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 4a4cce1..3ff284c 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -689,6 +689,10 @@
struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
if (IS_ERR(ch))
return NULL;
+
+ dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+ ch->device->privatecnt++;
+
return ch;
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381..711d8ad 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@
*/
for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *csi = &mci->csrows[row];
+ struct csrow_info *csi = mci->csrows[row];
/*
* Get the configuration settings for this
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c
index 87add3f..e415945 100644
--- a/drivers/firmware/broadcom/bcm47xx_nvram.c
+++ b/drivers/firmware/broadcom/bcm47xx_nvram.c
@@ -245,4 +245,4 @@
}
EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5ef..f5c2255 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -374,7 +374,7 @@
unsigned height_in_mb = ALIGN(height / 16, 2);
unsigned fs_in_mb = width_in_mb * height_in_mb;
- unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+ unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
image_size = width * height;
image_size += image_size / 2;
@@ -466,6 +466,8 @@
num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
min_dpb_size = image_size * num_dpb_buffer;
+ min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+ * 16 * num_dpb_buffer + 52 * 1024;
break;
default:
@@ -486,6 +488,7 @@
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
+ buf_sizes[0x4] = min_ctx_size;
return 0;
}
@@ -628,6 +631,13 @@
return -EINVAL;
}
+ } else if (cmd == 0x206) {
+ if ((end - start) < ctx->buf_sizes[4]) {
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+ (unsigned)(end - start),
+ ctx->buf_sizes[4]);
+ return -EINVAL;
+ }
} else if ((cmd != 0x100) && (cmd != 0x204)) {
DRM_ERROR("invalid UVD command %X!\n", cmd);
return -EINVAL;
@@ -755,9 +765,10 @@
struct amdgpu_uvd_cs_ctx ctx = {};
unsigned buf_sizes[] = {
[0x00000000] = 2048,
- [0x00000001] = 32 * 1024 * 1024,
- [0x00000002] = 2048 * 1152 * 3,
+ [0x00000001] = 0xFFFFFFFF,
+ [0x00000002] = 0xFFFFFFFF,
[0x00000003] = 2048,
+ [0x00000004] = 0xFFFFFFFF,
};
struct amdgpu_ib *ib = &parser->ibs[ib_idx];
int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index f5a42ab..20e2cfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3135,7 +3135,7 @@
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
- 0x7FFFF << 2);
+ AMDGPU_DOORBELL_MEC_RING7 << 2);
}
tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 6fad1f9..ef6182b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -559,7 +559,7 @@
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index b0487c9..eb603f1de 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -873,9 +873,10 @@
from an EDID retrieval */
if (port->connector) {
mutex_lock(&mgr->destroy_connector_lock);
- list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+ list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
+ return;
}
drm_dp_port_teardown_pdt(port, port->pdt);
@@ -2659,7 +2660,7 @@
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
- struct drm_connector *connector;
+ struct drm_dp_mst_port *port;
/*
* Not a regular list traverse as we have to drop the destroy
@@ -2668,15 +2669,21 @@
*/
for (;;) {
mutex_lock(&mgr->destroy_connector_lock);
- connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
- if (!connector) {
+ port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+ if (!port) {
mutex_unlock(&mgr->destroy_connector_lock);
break;
}
- list_del(&connector->destroy_list);
+ list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
- mgr->cbs->destroy_connector(mgr, connector);
+ mgr->cbs->destroy_connector(mgr, port->connector);
+
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ kfree(port);
}
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8..2a65235 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
- pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2..f1c6b76 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -593,8 +593,7 @@
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
@@ -857,8 +856,7 @@
gsc_write(cfg, GSC_IN_CON);
- ctx->rotation = cfg &
- (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+ ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
*swap = ctx->rotation;
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e2864..4a00990 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1064,6 +1064,7 @@
{
struct hdmi_context *hdata = ctx_from_connector(connector);
struct edid *edid;
+ int ret;
if (!hdata->ddc_adpt)
return -ENODEV;
@@ -1079,7 +1080,11 @@
drm_mode_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return ret;
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db..4706b56 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -718,6 +718,10 @@
/* handling VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val |= MXR_INT_CLEAR_VSYNC;
+ val &= ~MXR_INT_STATUS_VSYNC;
+
/* interlace scan need to check shadow register */
if (ctx->interlace) {
base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@
out:
/* clear interrupts */
- if (~val & MXR_INT_EN_VSYNC) {
- /* vsync interrupt use different bit for read and clear */
- val &= ~MXR_INT_EN_VSYNC;
- val |= MXR_INT_CLEAR_VSYNC;
- }
mixer_reg_write(res, MXR_INT_STATUS, val);
spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@
}
/* enable vsync interrupt */
- mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
- MXR_INT_EN_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+ mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
return 0;
}
@@ -918,7 +917,13 @@
struct mixer_context *mixer_ctx = crtc->ctx;
struct mixer_resources *res = &mixer_ctx->mixer_res;
+ if (!mixer_ctx->powered) {
+ mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+ return;
+ }
+
/* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
}
@@ -1047,6 +1052,8 @@
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+ if (ctx->int_en & MXR_INT_EN_VSYNC)
+ mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
mixer_win_reset(ctx);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7ed8033..8e35e0d 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -129,8 +129,9 @@
struct drm_atomic_state *state,
bool async)
{
- int ret;
- int i;
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int ret, i;
if (async) {
DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@
return ret;
/* Point of no return */
-
- /*
- * FIXME: The proper sequence here will eventually be:
- *
- * drm_atomic_helper_swap_state(dev, state)
- * drm_atomic_helper_commit_modeset_disables(dev, state);
- * drm_atomic_helper_commit_planes(dev, state);
- * drm_atomic_helper_commit_modeset_enables(dev, state);
- * drm_atomic_helper_wait_for_vblanks(dev, state);
- * drm_atomic_helper_cleanup_planes(dev, state);
- * drm_atomic_state_free(state);
- *
- * once we have full atomic modeset. For now, just manually update
- * plane states to avoid clobbering good states with dummy states
- * while nuclear pageflipping.
- */
- for (i = 0; i < dev->mode_config.num_total_plane; i++) {
- struct drm_plane *plane = state->planes[i];
-
- if (!plane)
- continue;
-
- plane->state->state = state;
- swap(state->plane_states[i], plane->state);
- plane->state->state = NULL;
- }
+ drm_atomic_helper_swap_state(dev, state);
/* swap crtc_scaler_state */
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct drm_crtc *crtc = state->crtcs[i];
- if (!crtc) {
- continue;
- }
-
- to_intel_crtc(crtc)->config->scaler_state =
- to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(to_intel_crtc(crtc));
+
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
- drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 3dcd59e..198fc3c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1075,34 +1075,15 @@
const union child_device_config *p_child;
union child_device_config *child_dev_ptr;
int i, child_device_num, count;
- u8 expected_size;
- u16 block_size;
+ u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
- if (bdb->version < 195) {
- expected_size = 33;
- } else if (bdb->version == 195) {
- expected_size = 37;
- } else if (bdb->version <= 197) {
- expected_size = 38;
- } else {
- expected_size = 38;
- DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
- expected_size, bdb->version);
- }
-
- if (expected_size > sizeof(*p_child)) {
- DRM_ERROR("child_device_config cannot fit in p_child\n");
- return;
- }
-
- if (p_defs->child_dev_size != expected_size) {
- DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
- p_defs->child_dev_size, expected_size, bdb->version);
+ if (p_defs->child_dev_size < sizeof(*p_child)) {
+ DRM_ERROR("General definiton block child device size is too small.\n");
return;
}
/* get the block size of general definitions */
@@ -1149,7 +1130,7 @@
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
- memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
+ memcpy(child_dev_ptr, p_child, sizeof(*p_child));
}
return;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 30e0f54..87476ff 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11826,7 +11826,9 @@
goto encoder_retry;
}
- pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+ /* Dithering seems to not pass-through bits correctly when it should, so
+ * only enable it on 6bpc panels. */
+ pipe_config->dither = pipe_config->pipe_bpp == 6*3;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
@@ -12624,17 +12626,17 @@
modeset_update_crtc_power_domains(state);
- drm_atomic_helper_commit_planes(dev, state);
-
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
continue;
+ }
update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc);
- intel_crtc_enable_planes(crtc);
+ drm_atomic_helper_commit_planes_on_crtc(crtc_state);
}
/* FIXME: add subpixel order */
@@ -12891,20 +12893,11 @@
return 0;
}
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
- struct intel_plane_state *plane_state =
- to_intel_plane_state(crtc->primary->state);
-
- return plane_state->visible;
-}
-
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
struct drm_atomic_state *state = NULL;
struct intel_crtc_state *pipe_config;
- bool primary_plane_was_visible;
int ret;
BUG_ON(!set);
@@ -12943,38 +12936,8 @@
intel_update_pipe_size(to_intel_crtc(set->crtc));
- primary_plane_was_visible = primary_plane_visible(set->crtc);
-
ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
- if (ret == 0 &&
- pipe_config->base.enable &&
- pipe_config->base.planes_changed &&
- !needs_modeset(&pipe_config->base)) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
- /*
- * We need to make sure the primary plane is re-enabled if it
- * has previously been turned off.
- */
- if (ret == 0 && !primary_plane_was_visible &&
- primary_plane_visible(set->crtc)) {
- WARN_ON(!intel_crtc->active);
- intel_post_enable_primary(set->crtc);
- }
-
- /*
- * In the fastboot case this may be our only check of the
- * state after boot. It would be better to only do it on
- * the first update, but we don't have a nice way of doing that
- * (and really, set_config isn't used much for high freq page
- * flipping, so increasing its cost here shouldn't be a big
- * deal).
- */
- if (i915.fastboot && ret == 0)
- intel_modeset_check_state(set->crtc->dev);
- }
-
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
@@ -13305,6 +13268,9 @@
*/
if (IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
+
+ if (crtc_state)
+ intel_crtc->atomic.post_enable_primary = true;
}
/*
@@ -13317,6 +13283,10 @@
if (!state->visible || !fb)
intel_crtc->atomic.disable_ips = true;
+ if (!state->visible && old_state->visible &&
+ crtc_state && !needs_modeset(&crtc_state->base))
+ intel_crtc->atomic.pre_disable_primary = true;
+
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -15034,6 +15004,7 @@
struct intel_plane_state *plane_state;
memset(crtc->config, 0, sizeof(*crtc->config));
+ crtc->config->base.crtc = &crtc->base;
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e8faa2..1df0e1f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -93,9 +93,6 @@
static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
-static const int chv_rates[] = { 162000, 202500, 210000, 216000,
- 243000, 270000, 324000, 405000,
- 420000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 };
/**
@@ -1169,24 +1166,31 @@
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
}
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+ /* WaDisableHBR2:skl */
+ if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+ return false;
+
+ if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+ (INTEL_INFO(dev)->gen >= 9))
+ return true;
+ else
+ return false;
+}
+
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates;
return ARRAY_SIZE(skl_rates);
- } else if (IS_CHERRYVIEW(dev)) {
- *source_rates = chv_rates;
- return ARRAY_SIZE(chv_rates);
}
*source_rates = default_rates;
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
- /* WaDisableHBR2:skl */
- return (DP_LINK_BW_2_7 >> 3) + 1;
- else if (INTEL_INFO(dev)->gen >= 8 ||
- (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+ /* This depends on the fact that 5.4 is last value in the array */
+ if (intel_dp_source_supports_hbr2(dev))
return (DP_LINK_BW_5_4 >> 3) + 1;
else
return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3941,10 +3945,15 @@
}
}
- /* Training Pattern 3 support, both source and sink */
+ /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+ * have support for TP3 hence that check is used along with dpcd check
+ * to ensure TP3 can be enabled.
+ * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+ * supported but still not enabled.
+ */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
- (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+ intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9b74ffa..7f2161a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1012,6 +1012,8 @@
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
+
+ ctx_obj->dirty = true;
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 52c22b0..e10f964 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -166,30 +166,14 @@
}
static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
- struct nvkm_object *obj = (void *)chan;
- struct gk104_fifo_priv *priv = (void *)obj->engine;
-
- nv_wr32(priv, 0x002634, chan->base.chid);
- if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
- nv_error(priv, "channel %d [%s] kick timeout\n",
- chan->base.chid, nvkm_client_name(chan));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct nvkm_object *object)
{
struct nvkm_bar *bar = nvkm_bar(parent);
+ struct gk104_fifo_priv *priv = (void *)parent->engine;
struct gk104_fifo_base *base = (void *)parent->parent;
struct gk104_fifo_chan *chan = (void *)parent;
u32 addr;
- int ret;
switch (nv_engidx(object->engine)) {
case NVDEV_ENGINE_SW : return 0;
@@ -204,9 +188,13 @@
return -EINVAL;
}
- ret = gk104_fifo_chan_kick(chan);
- if (ret && suspend)
- return ret;
+ nv_wr32(priv, 0x002634, chan->base.chid);
+ if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+ nv_error(priv, "channel %d [%s] kick timeout\n",
+ chan->base.chid, nvkm_client_name(chan));
+ if (suspend)
+ return -EBUSY;
+ }
if (addr) {
nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@
gk104_fifo_runlist_update(priv, chan->engine);
}
- gk104_fifo_chan_kick(chan);
nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
return nvkm_fifo_channel_fini(&chan->base, suspend);
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1162bfa..171d3e4 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -79,6 +79,11 @@
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ /* we can race here at startup, some boards seem to trigger
+ * hotplug irqs when they shouldn't. */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8da..97ad3bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbab..e3c6364 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@
static void hidinput_cleanup_battery(struct hid_device *dev)
{
+ const struct power_supply_desc *psy_desc;
+
if (!dev->battery)
return;
+ psy_desc = dev->battery->desc;
power_supply_unregister(dev->battery);
- kfree(dev->battery->desc->name);
- kfree(dev->battery->desc);
+ kfree(psy_desc->name);
+ kfree(psy_desc);
dev->battery = NULL;
}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 9416731..b905d50 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@
for (p = drvdata->rdesc;
p <= drvdata->rdesc + drvdata->rsize - 4;) {
if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
- p[3] < sizeof(params)) {
+ p[3] < ARRAY_SIZE(params)) {
v = params[p[3]];
put_unaligned(cpu_to_le32(v), (s32 *)p);
p += 4;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 44958d7..01b937e 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1284,6 +1284,39 @@
return error;
}
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+ if (features->x_resolution) {
+ features->x_phy = (features->x_max * 100) /
+ features->x_resolution;
+ features->y_phy = (features->y_max * 100) /
+ features->y_resolution;
+ }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+ /* set unit to "100th of a mm" for devices not reported by HID */
+ if (!features->unit) {
+ features->unit = 0x11;
+ features->unitExpo = -3;
+ }
+
+ features->x_resolution = wacom_calc_hid_res(features->x_max,
+ features->x_phy,
+ features->unit,
+ features->unitExpo);
+ features->y_resolution = wacom_calc_hid_res(features->y_max,
+ features->y_phy,
+ features->unit,
+ features->unitExpo);
+}
+
static void wacom_wireless_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1341,6 +1374,8 @@
if (wacom_wac1->features.type != INTUOSHT &&
wacom_wac1->features.type != BAMBOO_PT)
wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+ wacom_set_default_phy(&wacom_wac1->features);
+ wacom_calculate_res(&wacom_wac1->features);
snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1359,7 +1394,9 @@
wacom_wac2->features =
*((struct wacom_features *)id->driver_data);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+ wacom_set_default_phy(&wacom_wac2->features);
wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+ wacom_calculate_res(&wacom_wac2->features);
snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
"%s (WL) Finger",wacom_wac2->features.name);
snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1407,39 +1444,6 @@
}
}
-/*
- * Not all devices report physical dimensions from HID.
- * Compute the default from hardcoded logical dimension
- * and resolution before driver overwrites them.
- */
-static void wacom_set_default_phy(struct wacom_features *features)
-{
- if (features->x_resolution) {
- features->x_phy = (features->x_max * 100) /
- features->x_resolution;
- features->y_phy = (features->y_max * 100) /
- features->y_resolution;
- }
-}
-
-static void wacom_calculate_res(struct wacom_features *features)
-{
- /* set unit to "100th of a mm" for devices not reported by HID */
- if (!features->unit) {
- features->unit = 0x11;
- features->unitExpo = -3;
- }
-
- features->x_resolution = wacom_calc_hid_res(features->x_max,
- features->x_phy,
- features->unit,
- features->unitExpo);
- features->y_resolution = wacom_calc_hid_res(features->y_max,
- features->y_phy,
- features->unit,
- features->unitExpo);
-}
-
static size_t wacom_compute_pktlen(struct hid_device *hdev)
{
struct hid_report_enum *report_enum;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7aab48..92d5183 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -814,7 +814,7 @@
printk(KERN_ERR MOD
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe));
- ret = -EINVAL;
+ wc->status = IB_WC_FATAL_ERR;
}
}
out:
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 097d721..c6dc644 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -246,7 +246,7 @@
* convert it to descriptor.
*/
if (!button->gpiod && gpio_is_valid(button->gpio)) {
- unsigned flags = 0;
+ unsigned flags = GPIOF_IN;
if (button->active_low)
flags |= GPIOF_ACTIVE_LOW;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 692fe2b..c12bb93 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -68,7 +68,9 @@
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_wake = irq_chip_set_wake_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 3281437..aa1b41c 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 48a4a82..200366c 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1789,3 +1789,5 @@
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c..6ba47cf 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@
return r;
disk_super = dm_block_data(copy);
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
- dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+ dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+ dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
dm_sm_dec_block(pmd->metadata_sm, held_root);
return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d..8731b6e 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@
extern struct dm_block_validator btree_node_validator;
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 9ca9ecc..4222f77 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -544,14 +544,6 @@
return r;
}
-static struct dm_btree_value_type le64_type = {
- .context = NULL,
- .size = sizeof(__le64),
- .inc = NULL,
- .dec = NULL,
- .equal = NULL
-};
-
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
@@ -559,12 +551,14 @@
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
r = remove_raw(&spine, info,
(level == last_level ?
- &info->value_type : &le64_type),
+ &info->value_type : &le64_vt),
root, keys[level], (unsigned *)&index);
if (r < 0)
break;
@@ -654,11 +648,13 @@
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;
uint64_t k;
+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < last_level; level++) {
- r = remove_raw(&spine, info, &le64_type,
+ r = remove_raw(&spine, info, &le64_vt,
root, keys[level], (unsigned *) &index);
if (r < 0)
goto out;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13e..0dee514 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@
{
return s->root;
}
+
+static void le64_inc(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+ __le64 v1_le, v2_le;
+
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+ return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ vt->context = tm;
+ vt->size = sizeof(__le64);
+ vt->inc = le64_inc;
+ vt->dec = le64_dec;
+ vt->equal = le64_equal;
+}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index fdd3793..c7726ce 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -667,12 +667,7 @@
struct btree_node *n;
struct dm_btree_value_type le64_type;
- le64_type.context = NULL;
- le64_type.size = sizeof(__le64);
- le64_type.inc = NULL;
- le64_type.dec = NULL;
- le64_type.equal = NULL;
-
+ init_le64_type(info->tm, &le64_type);
init_shadow_spine(&spine, info);
for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 0d35f58..5ab90f3 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -240,7 +240,7 @@
config DVB_TS2020
tristate "Montage Tehnology TS2020 based tuners"
- depends on DVB_CORE
+ depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index 3be1b2c..6a1c008 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -2,6 +2,7 @@
tristate "Cisco Cobalt support"
depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
+ depends on SND
select I2C_ALGOBIT
select VIDEO_ADV7604
select VIDEO_ADV7511
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c
index dd4bff9..d1f5898 100644
--- a/drivers/media/pci/cobalt/cobalt-irq.c
+++ b/drivers/media/pci/cobalt/cobalt-irq.c
@@ -139,7 +139,7 @@
also know about dropped frames. */
cb->vb.v4l2_buf.sequence = s->sequence++;
vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
- VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
+ VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
}
irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c
index 1d59c7e..87990ec 100644
--- a/drivers/media/pci/mantis/mantis_dma.c
+++ b/drivers/media/pci/mantis/mantis_dma.c
@@ -130,10 +130,11 @@
int mantis_dma_init(struct mantis_pci *mantis)
{
- int err = 0;
+ int err;
dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
- if (mantis_alloc_buffers(mantis) < 0) {
+ err = mantis_alloc_buffers(mantis);
+ if (err < 0) {
dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
/* Stop RISC Engine */
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 8939ebd..84fa6e9 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -184,125 +184,9 @@
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc5_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
- {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5X_SPACE,
- },
- {
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
- },
-};
-
-static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
- .leader = RC5_UNIT,
- .pulse_space_start = 0,
- .clock = RC5_UNIT,
- .trailer_space = RC5_UNIT * 10,
-};
-
-static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc5_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc5_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
- unsigned int data, xdata, command, commandx, system;
-
- /* Detect protocol and convert scancode to raw data */
- if (protocols & RC_BIT_RC5 &&
- !ir_rc5_validate_filter(scancode, 0x1f7f)) {
- /* decode scancode */
- command = (scancode->data & 0x003f) >> 0;
- commandx = (scancode->data & 0x0040) >> 6;
- system = (scancode->data & 0x1f00) >> 8;
- /* encode data */
- data = !commandx << 12 | system << 6 | command;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5X &&
- !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
- /* decode scancode */
- xdata = (scancode->data & 0x00003f) >> 0;
- command = (scancode->data & 0x003f00) >> 8;
- commandx = (scancode->data & 0x004000) >> 14;
- system = (scancode->data & 0x1f0000) >> 16;
- /* commandx and system overlap, bits must match when encoded */
- if (commandx == (system & 0x1))
- return -EINVAL;
- /* encode data */
- data = 1 << 18 | system << 12 | command << 6 | xdata;
-
- /* Modulate the data */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
- CHECK_RC5X_NBITS,
- data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
- if (ret < 0)
- return ret;
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc5x_timings[1],
- RC5X_NBITS - CHECK_RC5X_NBITS,
- data);
- if (ret < 0)
- return ret;
- } else if (protocols & RC_BIT_RC5_SZ &&
- !ir_rc5_validate_filter(scancode, 0x2fff)) {
- /* RC5-SZ scancode is raw enough for Manchester as it is */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
- RC5_SZ_NBITS, scancode->data & 0x2fff);
- if (ret < 0)
- return ret;
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc5_handler = {
.protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
.decode = ir_rc5_decode,
- .encode = ir_rc5_encode,
};
static int __init ir_rc5_decode_init(void)
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f9c70ba..d16bc67 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -291,133 +291,11 @@
return -EINVAL;
}
-static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
- {
- .leader = RC6_PREFIX_PULSE,
- .pulse_space_start = 0,
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_PREFIX_SPACE,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT * 2,
- .invert = 1,
- },
- {
- .clock = RC6_UNIT,
- .invert = 1,
- .trailer_space = RC6_SUFFIX_SPACE,
- },
-};
-
-static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
- unsigned int important_bits)
-{
- /* all important bits of scancode should be set in mask */
- if (~scancode->mask & important_bits)
- return -EINVAL;
- /* extra bits in mask should be zero in data */
- if (scancode->mask & scancode->data & ~important_bits)
- return -EINVAL;
- return 0;
-}
-
-/**
- * ir_rc6_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode: scancode filter describing scancode (helps distinguish between
- * protocol subtypes when scancode is ambiguous)
- * @events: array of raw ir events to write into
- * @max: maximum size of @events
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc6_encode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- int ret;
- struct ir_raw_event *e = events;
-
- if (protocols & RC_BIT_RC6_0 &&
- !ir_rc6_validate_filter(scancode, 0xffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Mode-0) */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3], RC6_0_NBITS,
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
- RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
- !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
-
- /* Modulate the preamble */
- ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate the header (Start Bit & Header-version 6 */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[1],
- RC6_HEADER_NBITS, (1 << 3 | 6));
- if (ret < 0)
- return ret;
-
- /* Modulate Trailer Bit */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[2], 1, 0);
- if (ret < 0)
- return ret;
-
- /* Modulate rest of the data */
- ret = ir_raw_gen_manchester(&e, max - (e - events),
- &ir_rc6_timings[3],
- fls(scancode->mask),
- scancode->data);
- if (ret < 0)
- return ret;
-
- } else {
- return -EINVAL;
- }
-
- return e - events;
-}
-
static struct ir_raw_handler rc6_handler = {
.protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
RC_BIT_RC6_MCE,
.decode = ir_rc6_decode,
- .encode = ir_rc6_encode,
};
static int __init ir_rc6_decode_init(void)
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index baeb597..85af7a8 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -526,130 +526,6 @@
return 0;
}
-static int nvt_write_wakeup_codes(struct rc_dev *dev,
- const u8 *wakeup_sample_buf, int count)
-{
- int i = 0;
- u8 reg, reg_learn_mode;
- unsigned long flags;
- struct nvt_dev *nvt = dev->priv;
-
- nvt_dbg_wake("writing wakeup samples");
-
- reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
- reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
- reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
-
- /* Lock the learn area to prevent racing with wake-isr */
- spin_lock_irqsave(&nvt->nvt_lock, flags);
-
- /* Enable fifo writes */
- nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
-
- /* Clear cir wake rx fifo */
- nvt_clear_cir_wake_fifo(nvt);
-
- if (count > WAKE_FIFO_LEN) {
- nvt_dbg_wake("HW FIFO too small for all wake samples");
- count = WAKE_FIFO_LEN;
- }
-
- if (count)
- pr_info("Wake samples (%d) =", count);
- else
- pr_info("Wake sample fifo cleared");
-
- /* Write wake samples to fifo */
- for (i = 0; i < count; i++) {
- pr_cont(" %02x", wakeup_sample_buf[i]);
- nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
- CIR_WAKE_WR_FIFO_DATA);
- }
- pr_cont("\n");
-
- /* Switch cir to wakeup mode and disable fifo writing */
- nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
-
- /* Set number of bytes needed for wake */
- nvt_cir_wake_reg_write(nvt, count ? count :
- CIR_WAKE_FIFO_CMP_BYTES,
- CIR_WAKE_FIFO_CMP_DEEP);
-
- spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
- return 0;
-}
-
-static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- u8 *reg_buf;
- u8 buf_val;
- int i, ret, count;
- unsigned int val;
- struct ir_raw_event *raw;
- bool complete;
-
- /* Require both mask and data to be set before actually committing */
- if (!sc_filter->mask || !sc_filter->data)
- return 0;
-
- raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
- if (!raw)
- return -ENOMEM;
-
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, WAKE_FIFO_LEN);
- complete = (ret != -ENOBUFS);
- if (!complete)
- ret = WAKE_FIFO_LEN;
- else if (ret < 0)
- goto out_raw;
-
- reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
- if (!reg_buf) {
- ret = -ENOMEM;
- goto out_raw;
- }
-
- /* Inspect the ir samples */
- for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
- val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
-
- /* Split too large values into several smaller ones */
- while (val > 0 && count < WAKE_FIFO_LEN) {
-
- /* Skip last value for better comparison tolerance */
- if (complete && i == ret - 1 && val < BUF_LEN_MASK)
- break;
-
- /* Clamp values to BUF_LEN_MASK at most */
- buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
-
- reg_buf[count] = buf_val;
- val -= buf_val;
- if ((raw[i]).pulse)
- reg_buf[count] |= BUF_PULSE_BIT;
- count++;
- }
- }
-
- ret = nvt_write_wakeup_codes(dev, reg_buf, count);
-
- kfree(reg_buf);
-out_raw:
- kfree(raw);
-
- return ret;
-}
-
-/* Dummy implementation. nuvoton is agnostic to the protocol used */
-static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
- u64 *rc_type)
-{
- return 0;
-}
-
/*
* nvt_tx_ir
*
@@ -1167,14 +1043,11 @@
/* Set up the rc device */
rdev->priv = nvt;
rdev->driver_type = RC_DRIVER_IR_RAW;
- rdev->encode_wakeup = true;
rdev->allowed_protocols = RC_BIT_ALL;
rdev->open = nvt_open;
rdev->close = nvt_close;
rdev->tx_ir = nvt_tx_ir;
rdev->s_tx_carrier = nvt_set_tx_carrier;
- rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
- rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
rdev->input_phys = "nuvoton/cir0";
rdev->input_id.bustype = BUS_HOST;
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 9d0e161..e1cf23c 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -63,7 +63,6 @@
*/
#define TX_BUF_LEN 256
#define RX_BUF_LEN 32
-#define WAKE_FIFO_LEN 67
struct nvt_dev {
struct pnp_dev *pdev;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 4b994aa..b68d4f76 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -25,8 +25,6 @@
u64 protocols; /* which are handled by this handler */
int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
- int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max);
/* These two should only be used by the lirc decoder */
int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@
#define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000)
#define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space")
-/* functions for IR encoders */
-
-static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
- unsigned int pulse,
- u32 duration)
-{
- init_ir_raw_event(ev);
- ev->duration = duration;
- ev->pulse = pulse;
-}
-
-/**
- * struct ir_raw_timings_manchester - Manchester coding timings
- * @leader: duration of leader pulse (if any) 0 if continuing
- * existing signal (see @pulse_space_start)
- * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
- * @clock: duration of each pulse/space in ns
- * @invert: if set clock logic is inverted
- * (0 = space + pulse, 1 = pulse + space)
- * @trailer_space: duration of trailer space in ns
- */
-struct ir_raw_timings_manchester {
- unsigned int leader;
- unsigned int pulse_space_start:1;
- unsigned int clock;
- unsigned int invert:1;
- unsigned int trailer_space;
-};
-
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data);
-
/*
* Routines from rc-raw.c to be used internally and by decoders
*/
u64 ir_raw_get_allowed_protocols(void);
-u64 ir_raw_get_encode_protocols(void);
int ir_raw_event_register(struct rc_dev *dev);
void ir_raw_event_unregister(struct rc_dev *dev);
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index b9e4645..b732ac6 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -30,7 +30,6 @@
static DEFINE_MUTEX(ir_raw_handler_lock);
static LIST_HEAD(ir_raw_handler_list);
static u64 available_protocols;
-static u64 encode_protocols;
static int ir_raw_event_thread(void *data)
{
@@ -241,146 +240,12 @@
return protocols;
}
-/* used internally by the sysfs interface */
-u64
-ir_raw_get_encode_protocols(void)
-{
- u64 protocols;
-
- mutex_lock(&ir_raw_handler_lock);
- protocols = encode_protocols;
- mutex_unlock(&ir_raw_handler_lock);
- return protocols;
-}
-
static int change_protocol(struct rc_dev *dev, u64 *rc_type)
{
/* the caller will update dev->enabled_protocols */
return 0;
}
-/**
- * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
- * @ev: Pointer to pointer to next free event. *@ev is incremented for
- * each raw event filled.
- * @max: Maximum number of raw events to fill.
- * @timings: Manchester modulation timings.
- * @n: Number of bits of data.
- * @data: Data bits to encode.
- *
- * Encodes the @n least significant bits of @data using Manchester (bi-phase)
- * modulation with the timing characteristics described by @timings, writing up
- * to @max raw IR events using the *@ev pointer.
- *
- * Returns: 0 on success.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * full encoded data. In this case all @max events will have been
- * written.
- */
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
- const struct ir_raw_timings_manchester *timings,
- unsigned int n, unsigned int data)
-{
- bool need_pulse;
- unsigned int i;
- int ret = -ENOBUFS;
-
- i = 1 << (n - 1);
-
- if (timings->leader) {
- if (!max--)
- return ret;
- if (timings->pulse_space_start) {
- init_ir_raw_event_duration((*ev)++, 1, timings->leader);
-
- if (!max--)
- return ret;
- init_ir_raw_event_duration((*ev), 0, timings->leader);
- } else {
- init_ir_raw_event_duration((*ev), 1, timings->leader);
- }
- i >>= 1;
- } else {
- /* continue existing signal */
- --(*ev);
- }
- /* from here on *ev will point to the last event rather than the next */
-
- while (n && i > 0) {
- need_pulse = !(data & i);
- if (timings->invert)
- need_pulse = !need_pulse;
- if (need_pulse == !!(*ev)->pulse) {
- (*ev)->duration += timings->clock;
- } else {
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), need_pulse,
- timings->clock);
- }
-
- if (!max--)
- goto nobufs;
- init_ir_raw_event_duration(++(*ev), !need_pulse,
- timings->clock);
- i >>= 1;
- }
-
- if (timings->trailer_space) {
- if (!(*ev)->pulse)
- (*ev)->duration += timings->trailer_space;
- else if (!max--)
- goto nobufs;
- else
- init_ir_raw_event_duration(++(*ev), 0,
- timings->trailer_space);
- }
-
- ret = 0;
-nobufs:
- /* point to the next event rather than last event before returning */
- ++(*ev);
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_gen_manchester);
-
-/**
- * ir_raw_encode_scancode() - Encode a scancode as raw events
- *
- * @protocols: permitted protocols
- * @scancode: scancode filter describing a single scancode
- * @events: array of raw events to write into
- * @max: max number of raw events
- *
- * Attempts to encode the scancode as raw events.
- *
- * Returns: The number of events written.
- * -ENOBUFS if there isn't enough space in the array to fit the
- * encoding. In this case all @max events will have been written.
- * -EINVAL if the scancode is ambiguous or invalid, or if no
- * compatible encoder was found.
- */
-int ir_raw_encode_scancode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max)
-{
- struct ir_raw_handler *handler;
- int ret = -EINVAL;
-
- mutex_lock(&ir_raw_handler_lock);
- list_for_each_entry(handler, &ir_raw_handler_list, list) {
- if (handler->protocols & protocols && handler->encode) {
- ret = handler->encode(protocols, scancode, events, max);
- if (ret >= 0 || ret == -ENOBUFS)
- break;
- }
- }
- mutex_unlock(&ir_raw_handler_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(ir_raw_encode_scancode);
-
/*
* Used to (un)register raw event clients
*/
@@ -463,8 +328,6 @@
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_register(raw->dev);
available_protocols |= ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols |= ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
return 0;
@@ -481,8 +344,6 @@
list_for_each_entry(raw, &ir_raw_client_list, list)
ir_raw_handler->raw_unregister(raw->dev);
available_protocols &= ~ir_raw_handler->protocols;
- if (ir_raw_handler->encode)
- encode_protocols &= ~ir_raw_handler->protocols;
mutex_unlock(&ir_raw_handler_lock);
}
EXPORT_SYMBOL(ir_raw_handler_unregister);
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c
index d8bdf63..63dace8 100644
--- a/drivers/media/rc/rc-loopback.c
+++ b/drivers/media/rc/rc-loopback.c
@@ -26,7 +26,6 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <media/rc-core.h>
#define DRIVER_NAME "rc-loopback"
@@ -177,39 +176,6 @@
return 0;
}
-static int loop_set_wakeup_filter(struct rc_dev *dev,
- struct rc_scancode_filter *sc_filter)
-{
- static const unsigned int max = 512;
- struct ir_raw_event *raw;
- int ret;
- int i;
-
- /* fine to disable filter */
- if (!sc_filter->mask)
- return 0;
-
- /* encode the specified filter and loop it back */
- raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
- ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
- raw, max);
- /* still loop back the partial raw IR even if it's incomplete */
- if (ret == -ENOBUFS)
- ret = max;
- if (ret >= 0) {
- /* do the loopback */
- for (i = 0; i < ret; ++i)
- ir_raw_event_store(dev, &raw[i]);
- ir_raw_event_handle(dev);
-
- ret = 0;
- }
-
- kfree(raw);
-
- return ret;
-}
-
static int __init loop_init(void)
{
struct rc_dev *rc;
@@ -229,7 +195,6 @@
rc->map_name = RC_MAP_EMPTY;
rc->priv = &loopdev;
rc->driver_type = RC_DRIVER_IR_RAW;
- rc->encode_wakeup = true;
rc->allowed_protocols = RC_BIT_ALL;
rc->timeout = 100 * 1000 * 1000; /* 100 ms */
rc->min_timeout = 1;
@@ -244,7 +209,6 @@
rc->s_idle = loop_set_idle;
rc->s_learning_mode = loop_set_learning_mode;
rc->s_carrier_report = loop_set_carrier_report;
- rc->s_wakeup_filter = loop_set_wakeup_filter;
loopdev.txmask = RXMASK_REGULAR;
loopdev.txcarrier = 36000;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 9d015db..0ff388a 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -865,8 +865,6 @@
} else {
enabled = dev->enabled_wakeup_protocols;
allowed = dev->allowed_wakeup_protocols;
- if (dev->encode_wakeup && !allowed)
- allowed = ir_raw_get_encode_protocols();
}
mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@
path ? path : "N/A");
kfree(path);
- if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
+ if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* Load raw decoders, if they aren't already */
if (!raw_init) {
IR_dprintk(1, "Loading raw decoders\n");
ir_raw_init();
raw_init = true;
}
- }
-
- if (dev->driver_type == RC_DRIVER_IR_RAW) {
/* calls ir_register_device so unlock mutex here*/
mutex_unlock(&dev->lock);
rc = ir_raw_event_register(dev);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 93b3154..a14c428 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -715,6 +715,7 @@
break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_REQUEUEING:
/* nothing */
break;
}
@@ -1182,7 +1183,8 @@
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR &&
- state != VB2_BUF_STATE_QUEUED))
+ state != VB2_BUF_STATE_QUEUED &&
+ state != VB2_BUF_STATE_REQUEUEING))
state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
- /* Add the buffer to the done buffers list */
spin_lock_irqsave(&q->done_lock, flags);
- vb->state = state;
- if (state != VB2_BUF_STATE_QUEUED)
+ if (state == VB2_BUF_STATE_QUEUED ||
+ state == VB2_BUF_STATE_REQUEUEING) {
+ vb->state = VB2_BUF_STATE_QUEUED;
+ } else {
+ /* Add the buffer to the done buffers list */
list_add_tail(&vb->done_entry, &q->done_list);
+ vb->state = state;
+ }
atomic_dec(&q->owned_by_drv_count);
spin_unlock_irqrestore(&q->done_lock, flags);
- if (state == VB2_BUF_STATE_QUEUED) {
+ switch (state) {
+ case VB2_BUF_STATE_QUEUED:
+ return;
+ case VB2_BUF_STATE_REQUEUEING:
if (q->start_streaming_called)
__enqueue_in_driver(vb);
return;
+ default:
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+ break;
}
-
- /* Inform any processes that may be waiting for buffers */
- wake_up(&q->done_wq);
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
@@ -1244,19 +1254,19 @@
static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
{
- static bool __check_once __read_mostly;
+ static bool check_once;
- if (__check_once)
+ if (check_once)
return;
- __check_once = true;
- __WARN();
+ check_once = true;
+ WARN_ON(1);
- pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)
- pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
else
- pr_warn_once("use the actual size instead.\n");
+ pr_warn("use the actual size instead.\n");
}
/**
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 3a27a84..9426276 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2245,6 +2245,9 @@
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@
{
int i;
+ if (!gpmc_base)
+ return;
+
gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6538159..3f68dd2 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -115,7 +115,7 @@
config MFD_CROS_EC_SPI
tristate "ChromeOS Embedded Controller (SPI)"
- depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF
+ depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
---help---
If you say Y here, you get support for talking to the ChromeOS EC
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a..a72ddb29 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -651,7 +651,7 @@
arizona->has_fully_powered_off = true;
- disable_irq(arizona->irq);
+ disable_irq_nosync(arizona->irq);
arizona_enable_reset(arizona);
regulator_bulk_disable(arizona->num_core_supplies,
arizona->core_supplies);
@@ -1141,10 +1141,6 @@
arizona->pdata.gpio_defaults[i]);
}
- pm_runtime_set_autosuspend_delay(arizona->dev, 100);
- pm_runtime_use_autosuspend(arizona->dev);
- pm_runtime_enable(arizona->dev);
-
/* Chip default */
if (!arizona->pdata.clk32k_src)
arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@
arizona->pdata.spk_fmt[i]);
}
+ pm_runtime_set_active(arizona->dev);
+ pm_runtime_enable(arizona->dev);
+
/* Set up for interrupts */
ret = arizona_irq_init(arizona);
if (ret != 0)
goto err_reset;
+ pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+ pm_runtime_use_autosuspend(arizona->dev);
+
arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
arizona_clkgen_err, arizona);
arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@
goto err_irq;
}
-#ifdef CONFIG_PM
- regulator_disable(arizona->dcvdd);
-#endif
-
return 0;
err_irq:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e1ccefc..a98dd4f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -786,6 +786,7 @@
slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif ||
+ !netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 6b94007..838545c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -854,6 +854,18 @@
/*
* describe the PCAN-USB adapter
*/
+static const struct can_bittiming_const pcan_usb_const = {
+ .name = "pcan_usb",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
@@ -862,17 +874,7 @@
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
- .bittiming_const = {
- .name = "pcan_usb",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 64,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 7921cff..5a2e341 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -792,9 +792,9 @@
dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
dev->can.clock = peak_usb_adapter->clock;
- dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
+ dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
dev->can.do_set_bittiming = peak_usb_set_bittiming;
- dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
+ dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
dev->can.do_set_mode = peak_usb_set_mode;
dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 9e624f0..506fe50 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -48,8 +48,8 @@
u32 device_id;
u32 ctrlmode_supported;
struct can_clock clock;
- const struct can_bittiming_const bittiming_const;
- const struct can_bittiming_const data_bittiming_const;
+ const struct can_bittiming_const * const bittiming_const;
+ const struct can_bittiming_const * const data_bittiming_const;
unsigned int ctrl_count;
int (*intf_probe)(struct usb_interface *intf);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 09d14e7..ce44a033 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -990,6 +990,30 @@
}
/* describes the PCAN-USB FD adapter */
+static const struct can_bittiming_const pcan_usb_fd_const = {
+ .name = "pcan_usb_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_fd_data_const = {
+ .name = "pcan_usb_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb_fd = {
.name = "PCAN-USB FD",
.device_id = PCAN_USBFD_PRODUCT_ID,
@@ -999,28 +1023,8 @@
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
- .bittiming_const = {
- .name = "pcan_usb_fd",
- .tseg1_min = 1,
- .tseg1_max = 64,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
- .data_bittiming_const = {
- .name = "pcan_usb_fd",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_fd_const,
+ .data_bittiming_const = &pcan_usb_fd_data_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
@@ -1058,6 +1062,30 @@
};
/* describes the PCAN-USB Pro FD adapter */
+static const struct can_bittiming_const pcan_usb_pro_fd_const = {
+ .name = "pcan_usb_pro_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 64,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
+ .name = "pcan_usb_pro_fd",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb_pro_fd = {
.name = "PCAN-USB Pro FD",
.device_id = PCAN_USBPROFD_PRODUCT_ID,
@@ -1067,28 +1095,8 @@
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
- .bittiming_const = {
- .name = "pcan_usb_pro_fd",
- .tseg1_min = 1,
- .tseg1_max = 64,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
- .data_bittiming_const = {
- .name = "pcan_usb_pro_fd",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_pro_fd_const,
+ .data_bittiming_const = &pcan_usb_pro_fd_data_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 7d61b32..bbdd605 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -1004,6 +1004,18 @@
/*
* describe the PCAN-USB Pro adapter
*/
+static const struct can_bittiming_const pcan_usb_pro_const = {
+ .name = "pcan_usb_pro",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
const struct peak_usb_adapter pcan_usb_pro = {
.name = "PCAN-USB Pro",
.device_id = PCAN_USBPRO_PRODUCT_ID,
@@ -1012,17 +1024,7 @@
.clock = {
.freq = PCAN_USBPRO_CRYSTAL_HZ,
},
- .bittiming_const = {
- .name = "pcan_usb_pro",
- .tseg1_min = 1,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024,
- .brp_inc = 1,
- },
+ .bittiming_const = &pcan_usb_pro_const,
/* size of device private data */
.sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2d1ce3c..753887d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
- int j;
pr_emerg("%s: no memory for rx ring\n", dev->name);
- for (j = 0; j < i; j++) {
- if (vp->rx_skbuff[j]) {
- dev_kfree_skb(vp->rx_skbuff[j]);
- vp->rx_skbuff[j] = NULL;
- }
- }
retval = -ENOMEM;
- goto err_free_irq;
+ goto err_free_skb;
}
/* Wrap the ring. */
vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@
if (!retval)
goto out;
-err_free_irq:
+err_free_skb:
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (vp->rx_skbuff[i]) {
+ dev_kfree_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = NULL;
+ }
+ }
free_irq(dev->irq, dev);
err:
if (vortex_debug > 1)
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c51014b..b52e0f6 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -65,7 +65,7 @@
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
-obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index a626c43..cfa3704 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -801,6 +801,9 @@
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
+ if (pdata->phy_dev)
+ phy_disconnect(pdata->phy_dev);
+
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
pdata->mdio_bus = NULL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 299eb43..a02ea7f8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1277,9 +1277,10 @@
mac_ops->tx_disable(pdata);
xgene_enet_napi_del(pdata);
- xgene_enet_mdio_remove(pdata);
- xgene_enet_delete_desc_rings(pdata);
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ xgene_enet_mdio_remove(pdata);
unregister_netdev(ndev);
+ xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d736..f7fbdc9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@
if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
+ dev_kfree_skb_any(skb);
}
- dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052..5907c82 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@
offset += sizeof(u32);
data_buf += sizeof(u32);
written_so_far += sizeof(u32);
+
+ /* At end of each 4Kb page, release nvram lock to allow MFW
+ * chance to take it for its own use.
+ */
+ if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+ (written_so_far < buf_size)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Releasing NVM lock after offset 0x%x\n",
+ (u32)(offset - sizeof(u32)));
+ bnx2x_release_nvram_lock(bp);
+ usleep_range(1000, 2000);
+ rc = bnx2x_acquire_nvram_lock(bp);
+ if (rc)
+ return rc;
+ }
+
cmd_flags = 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 64c1e9d..09ff09f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2126,6 +2126,8 @@
int ret = 0;
int timeout = 0;
u32 reg;
+ u32 dma_ctrl;
+ int i;
/* Disable TDMA to stop add more frames in TX DMA */
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
@@ -2169,6 +2171,20 @@
ret = -ETIMEDOUT;
}
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ dma_ctrl = 0;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
return ret;
}
@@ -2820,8 +2836,6 @@
netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
- bcmgenet_disable_tx_napi(priv);
-
for (q = 0; q < priv->hw_params->tx_queues; q++)
bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
@@ -2837,8 +2851,6 @@
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_enable_tx_napi(priv);
-
dev->trans_start = jiffies;
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19..506047c 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@
if (!next_cmpl->valid)
break;
}
+ packets++;
/* TODO: BNA_CQ_EF_LOCAL ? */
if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@
else
bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
- packets++;
rcb->rxq->rx_packets++;
rcb->rxq->rx_bytes += totlen;
ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe..02e23e6 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
depends on 64BIT
- default ARCH_THUNDER
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
depends on 64BIT
- default ARCH_THUNDER
---help---
This driver supports programming and controlling of MAC
interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index a11485f..c3c7db4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2332,10 +2332,11 @@
EXT_MEM1_SIZE_G(size));
}
} else {
- if (i & EXT_MEM_ENABLE_F)
+ if (i & EXT_MEM_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
add_debugfs_mem(adap, "mc", MEM_MC,
EXT_MEM_SIZE_G(size));
+ }
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f..00e3a6b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@
BE_IF_FLAGS_VLAN_PROMISCUOUS |\
BE_IF_FLAGS_MCAST_PROMISCUOUS)
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f64242..6ca693b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@
if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
return 0;
+ /* if device is not running, copy MAC to netdev->dev_addr */
+ if (!netif_running(netdev))
+ goto done;
+
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
* privilege or if PF did not provision the new MAC address.
* On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@
status = -EPERM;
goto err;
}
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@
be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
}
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
+{
+ struct be_queue_info *rxq = &rxo->q;
+ struct be_rx_page_info *page_info;
+
+ while (atomic_read(&rxq->used) > 0) {
+ page_info = get_rx_page_info(rxo);
+ put_page(page_info->page);
+ memset(page_info, 0, sizeof(*page_info));
+ }
+ BUG_ON(atomic_read(&rxq->used));
+ rxq->tail = 0;
+ rxq->head = 0;
+}
+
static void be_rx_cq_clean(struct be_rx_obj *rxo)
{
- struct be_rx_page_info *page_info;
- struct be_queue_info *rxq = &rxo->q;
struct be_queue_info *rx_cq = &rxo->cq;
struct be_rx_compl_info *rxcp;
struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@
/* After cleanup, leave the CQ in unarmed state */
be_cq_notify(adapter, rx_cq->id, false, 0);
-
- /* Then free posted rx buffers that were not used */
- while (atomic_read(&rxq->used) > 0) {
- page_info = get_rx_page_info(rxo);
- put_page(page_info->page);
- memset(page_info, 0, sizeof(*page_info));
- }
- BUG_ON(atomic_read(&rxq->used));
- rxq->tail = 0;
- rxq->head = 0;
}
static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
+ free_cpumask_var(eqo->affinity_mask);
}
- free_cpumask_var(eqo->affinity_mask);
be_queue_free(adapter, &eqo->q);
}
}
@@ -2594,13 +2602,7 @@
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
- if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
- return -ENOMEM;
- cpumask_set_cpu(cpumask_local_spread(i, numa_node),
- eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
- napi_hash_add(&eqo->napi);
+
aic = &adapter->aic_obj[i];
eqo->adapter = adapter;
eqo->idx = i;
@@ -2616,6 +2618,14 @@
rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
+
+ if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
+ napi_hash_add(&eqo->napi);
}
return 0;
}
@@ -3354,13 +3364,54 @@
for_all_rx_queues(adapter, rxo, i) {
q = &rxo->q;
if (q->created) {
+ /* If RXQs are destroyed while in an "out of buffer"
+ * state, there is a possibility of an HW stall on
+ * Lancer. So, post 64 buffers to each queue to relieve
+ * the "out of buffer" condition.
+ * Make sure there's space in the RXQ before posting.
+ */
+ if (lancer_chip(adapter)) {
+ be_rx_cq_clean(rxo);
+ if (atomic_read(&q->used) == 0)
+ be_post_rx_frags(rxo, GFP_KERNEL,
+ MAX_RX_POST);
+ }
+
be_cmd_rxq_destroy(adapter, q);
be_rx_cq_clean(rxo);
+ be_rxq_clean(rxo);
}
be_queue_free(adapter, q);
}
}
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[0], 0);
+
+ be_clear_uc_list(adapter);
+
+ /* The IFACE flags are enabled in the open path and cleared
+ * in the close path. When a VF gets detached from the host and
+ * assigned to a VM the following happens:
+ * - VF's IFACE flags get cleared in the detach path
+ * - IFACE create is issued by the VF in the attach path
+ * Due to a bug in the BE3/Skyhawk-R FW
+ * (Lancer FW doesn't have the bug), the IFACE capability flags
+ * specified along with the IFACE create cmd issued by a VF are not
+ * honoured by FW. As a consequence, if a *new* driver
+ * (that enables/disables IFACE flags in open/close)
+ * is loaded in the host and an *old* driver is * used by a VM/VF,
+ * the IFACE gets created *without* the needed flags.
+ * To avoid this, disable RX-filter flags only for Lancer.
+ */
+ if (lancer_chip(adapter)) {
+ be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+ adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+ }
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@
if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
return 0;
+ be_disable_if_filters(adapter);
+
be_roce_dev_close(adapter);
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@
be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
- be_clear_uc_list(adapter);
for_all_evt_queues(adapter, eqo, i) {
if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@
return 0;
}
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+ int status;
+
+ status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+ if (status)
+ return status;
+
+ /* For BE3 VFs, the PF programs the initial MAC address */
+ if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+ adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status)
+ return status;
+ }
+
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
+
+ be_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
static int be_open(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@
if (status)
goto err;
+ status = be_enable_if_filters(adapter);
+ if (status)
+ goto err;
+
status = be_irq_register(adapter);
if (status)
goto err;
@@ -3686,16 +3767,6 @@
}
}
-static void be_mac_clear(struct be_adapter *adapter)
-{
- if (adapter->pmac_id) {
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
- }
-}
-
#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
@@ -3770,8 +3841,8 @@
#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
#endif
- /* delete the primary mac along with the uc-mac list */
- be_mac_clear(adapter);
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -3782,25 +3853,11 @@
return 0;
}
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
- u32 cap_flags, u32 vf)
-{
- u32 en_flags;
-
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
- en_flags &= cap_flags;
-
- return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
static int be_vfs_if_create(struct be_adapter *adapter)
{
struct be_resources res = {0};
+ u32 cap_flags, en_flags, vf;
struct be_vf_cfg *vf_cfg;
- u32 cap_flags, vf;
int status;
/* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@
}
}
- status = be_if_create(adapter, &vf_cfg->if_handle,
- cap_flags, vf + 1);
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST |
+ BE_IF_FLAGS_PASS_L3L4_ERRORS);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
return status;
}
@@ -4194,15 +4255,8 @@
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- } else {
- /* Maybe the HW was reset; dev_addr must be re-programmed */
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
}
- /* For BE3-R VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter)))
- be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
return 0;
}
@@ -4342,6 +4396,7 @@
static int be_setup(struct be_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 en_flags;
int status;
status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@
if (status)
goto err;
- status = be_if_create(adapter, &adapter->if_handle,
- be_if_cap_flags(adapter), 0);
+ /* will enable all the needed filter flags in be_open() */
+ en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
if (status)
goto err;
@@ -4391,11 +4449,6 @@
dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
}
- if (adapter->vlans_added)
- be_vid_config(adapter);
-
- be_set_rx_mode(adapter->netdev);
-
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
if (status)
@@ -5121,7 +5174,7 @@
struct device *dev = &adapter->pdev->dev;
int status;
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@
{
struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter))
+ if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
if (adapter->vxlan_port != port)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 32e3807c..b349e6f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1778,7 +1778,7 @@
return ret;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a read op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -1817,7 +1817,7 @@
return ret;
fep->mii_timeout = 0;
- init_completion(&fep->mdio_done);
+ reinit_completion(&fep->mdio_done);
/* start a write op */
writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
@@ -3433,6 +3433,7 @@
pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db..cf8e546 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@
frag = skb_shinfo(skb)->frags;
while (nr_frags) {
CBDC_SC(bdp,
- BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+ BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e..016743e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2b7610f..10b3bbbb 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2102,6 +2102,11 @@
/* Start Rx/Tx DMA and enable the interrupts */
gfar_start(priv);
+ /* force link state update after mac reset */
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
phy_start(priv->phydev);
enable_napi(priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 3c0a8f8..5b90fcf 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -900,27 +900,6 @@
return 0;
}
-static int gfar_comp_asc(const void *a, const void *b)
-{
- return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
- return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
- u32 *_a = a;
- u32 *_b = b;
-
- swap(_a[0], _b[0]);
- swap(_a[1], _b[1]);
- swap(_a[2], _b[2]);
- swap(_a[3], _b[3]);
-}
-
/* Write a mask to filer cache */
static void gfar_set_mask(u32 mask, struct filer_table *tab)
{
@@ -1270,310 +1249,6 @@
return 0;
}
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
-{
- while (size > 0) {
- size--;
- dst[size].ctrl = src[size].ctrl;
- dst[size].prop = src[size].prop;
- }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
- int length;
-
- if (end > MAX_FILER_CACHE_IDX || end < begin)
- return -EINVAL;
-
- end++;
- length = end - begin;
-
- /* Copy */
- while (end < tab->index) {
- tab->fe[begin].ctrl = tab->fe[end].ctrl;
- tab->fe[begin++].prop = tab->fe[end++].prop;
-
- }
- /* Fill up with don't cares */
- while (begin < tab->index) {
- tab->fe[begin].ctrl = 0x60;
- tab->fe[begin].prop = 0xFFFFFFFF;
- begin++;
- }
-
- tab->index -= length;
- return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
-{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
- begin > MAX_FILER_CACHE_IDX)
- return -EINVAL;
-
- gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
-
- tab->index += length;
- return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_AND | RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
- start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
- (RQFCR_CLE))
- return start;
- }
- return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
- s32 i = -1, j, iend, jend;
-
- while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
- j = i;
- while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /* The cluster entries self and the previous one
- * (a mask) must be identical!
- */
- if (tab->fe[i].ctrl != tab->fe[j].ctrl)
- break;
- if (tab->fe[i].prop != tab->fe[j].prop)
- break;
- if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
- break;
- if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
- break;
- iend = gfar_get_next_cluster_end(i, tab);
- jend = gfar_get_next_cluster_end(j, tab);
- if (jend == -1 || iend == -1)
- break;
-
- /* First we make some free space, where our cluster
- * element should be. Then we copy it there and finally
- * delete in from its old location.
- */
- if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
- -EINVAL)
- break;
-
- gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
-
- if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j),
- tab) == -EINVAL)
- return;
-
- /* Mask out cluster bit */
- tab->fe[iend].ctrl &= ~(RQFCR_CLE);
- }
- }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2,
- struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
-{
- u32 temp[4];
- temp[0] = a1->ctrl & mask;
- temp[1] = a2->ctrl & mask;
- temp[2] = b1->ctrl & mask;
- temp[3] = b2->ctrl & mask;
-
- a1->ctrl &= ~mask;
- a2->ctrl &= ~mask;
- b1->ctrl &= ~mask;
- b2->ctrl &= ~mask;
-
- a1->ctrl |= temp[1];
- a2->ctrl |= temp[0];
- b1->ctrl |= temp[3];
- b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
-{
- u32 i, and_index = 0, block_index = 1;
-
- for (i = 0; i < tab->index; i++) {
-
- /* LSByte of control = 0 sets a mask */
- if (!(tab->fe[i].ctrl & 0xF)) {
- mask_table[and_index].mask = tab->fe[i].prop;
- mask_table[and_index].start = i;
- mask_table[and_index].block = block_index;
- if (and_index >= 1)
- mask_table[and_index - 1].end = i - 1;
- and_index++;
- }
- /* cluster starts and ends will be separated because they should
- * hold their position
- */
- if (tab->fe[i].ctrl & RQFCR_CLE)
- block_index++;
- /* A not set AND indicates the end of a depended block */
- if (!(tab->fe[i].ctrl & RQFCR_AND))
- block_index++;
- }
-
- mask_table[and_index - 1].end = i - 1;
-
- return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
-{
- /* Pointer to compare function (_asc or _desc) */
- int (*gfar_comp)(const void *, const void *);
-
- u32 i, size = 0, start = 0, prev = 1;
- u32 old_first, old_last, new_first, new_last;
-
- gfar_comp = &gfar_comp_desc;
-
- for (i = 0; i < and_index; i++) {
- if (prev != mask_table[i].block) {
- old_first = mask_table[start].start + 1;
- old_last = mask_table[i - 1].end;
- sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
-
- /* Toggle order for every block. This makes the
- * thing more efficient!
- */
- if (gfar_comp == gfar_comp_desc)
- gfar_comp = &gfar_comp_asc;
- else
- gfar_comp = &gfar_comp_desc;
-
- new_first = mask_table[start].start + 1;
- new_last = mask_table[i - 1].end;
-
- gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND);
-
- start = i;
- size = 0;
- }
- size++;
- prev = mask_table[i].block;
- }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
- struct filer_table *temp_table;
- struct gfar_mask_entry *mask_table;
-
- u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
- s32 ret = 0;
-
- /* We need a copy of the filer table because
- * we want to change its order
- */
- temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
- if (temp_table == NULL)
- return -ENOMEM;
-
- mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
- if (mask_table == NULL) {
- ret = -ENOMEM;
- goto end;
- }
-
- and_index = gfar_generate_mask_table(mask_table, tab);
-
- gfar_sort_mask_table(mask_table, temp_table, and_index);
-
- /* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says
- */
- for (i = 0; i < and_index; i++) {
- size = mask_table[i].end - mask_table[i].start + 1;
- gfar_copy_filer_entries(&(tab->fe[j]),
- &(temp_table->fe[mask_table[i].start]), size);
- j += size;
- }
-
- /* And finally we just have to check for duplicated masks and drop the
- * second ones
- */
- for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- previous_mask = i++;
- break;
- }
- }
- for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
- if (tab->fe[i].ctrl == 0x80) {
- if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
- /* Two identical ones found!
- * So drop the second one!
- */
- gfar_trim_filer_entries(i, i, tab);
- } else
- /* Not identical! */
- previous_mask = i;
- }
- }
-
- kfree(mask_table);
-end: kfree(temp_table);
- return ret;
-}
-
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
struct filer_table *tab)
@@ -1583,11 +1258,10 @@
return -EBUSY;
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
- i++)
+ for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
- for (; i < MAX_FILER_IDX - 1; i++)
+ for (; i < MAX_FILER_IDX; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
* because that's what people expect
@@ -1621,7 +1295,6 @@
{
struct ethtool_flow_spec_container *j;
struct filer_table *tab;
- s32 i = 0;
s32 ret = 0;
/* So index is set to zero, too! */
@@ -1646,17 +1319,6 @@
}
}
- i = tab->index;
-
- /* Optimizations to save entries */
- gfar_cluster_filer(tab);
- gfar_optimize_filer_masks(tab);
-
- pr_debug("\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
-
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
if (ret == -EBUSY) {
@@ -1722,13 +1384,14 @@
}
process:
+ priv->rx_list.count++;
ret = gfar_process_filer_changes(priv);
if (ret)
goto clean_list;
- priv->rx_list.count++;
return ret;
clean_list:
+ priv->rx_list.count--;
list_del(&temp->list);
clean_mem:
kfree(temp);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 982fdcd..b5b2925 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -216,7 +216,7 @@
static inline bool fm10k_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2f70a9b..830466c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6566,7 +6566,7 @@
static inline bool igb_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9aa6104..ae21e0b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1832,7 +1832,7 @@
static inline bool ixgbe_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index e71cdde..1d7b00b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -765,7 +765,7 @@
static inline bool ixgbevf_page_is_reserved(struct page *page)
{
- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
/**
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bf..d9884fd 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
#include <linux/of_address.h>
#include <linux/phy.h>
#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -299,6 +301,7 @@
/* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100
@@ -660,6 +663,14 @@
u64 tx_bytes;
};
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
+ bool timer_scheduled;
+ /* Tasklet for egress finalization */
+ struct tasklet_struct tx_done_tasklet;
+};
+
struct mvpp2_port {
u8 id;
@@ -679,6 +690,9 @@
u32 pending_cause_rx;
struct napi_struct napi;
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu __percpu *pcpu;
+
/* Flags */
unsigned long flags;
@@ -776,6 +790,9 @@
/* Array of transmitted skb */
struct sk_buff **tx_skb;
+ /* Array of transmitted buffers' physical addresses */
+ dma_addr_t *tx_buffs;
+
/* Index of last TX DMA descriptor that was inserted */
int txq_put_index;
@@ -913,8 +930,6 @@
/* Occupied buffers indicator */
atomic_t in_use;
int in_use_thresh;
-
- spinlock_t lock;
};
struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@
}
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct mvpp2_tx_desc *tx_desc)
{
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+ if (skb)
+ txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+ tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0);
- spin_lock_init(&bm_pool->lock);
return 0;
}
@@ -3647,7 +3665,6 @@
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size)
{
- unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
@@ -3656,8 +3673,6 @@
return NULL;
}
- spin_lock_irqsave(&new_pool->lock, flags);
-
if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type;
@@ -3686,8 +3701,6 @@
if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num);
- /* We need to undo the bufs_add() allocations */
- spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL;
}
}
@@ -3695,15 +3708,12 @@
mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
- spin_unlock_irqrestore(&new_pool->lock, flags);
-
return new_pool;
}
/* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
- unsigned long flags = 0;
int rxq;
if (!port->pool_long) {
@@ -3714,9 +3724,7 @@
if (!port->pool_long)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@
if (!port->pool_short)
return -ENOMEM;
- spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id);
- spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK |
- MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
}
@@ -4382,23 +4387,6 @@
rxq->time_coal = usec;
}
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
- struct mvpp2_port *port = arg;
- int queue;
- u32 val;
-
- for (queue = 0; queue < txq_number; queue++) {
- struct mvpp2_tx_queue *txq = port->txqs[queue];
-
- val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
- MVPP2_TRANSMITTED_THRESH_MASK;
- mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
- }
-}
-
/* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@
int i;
for (i = 0; i < num; i++) {
- struct mvpp2_tx_desc *tx_desc = txq->descs +
- txq_pcpu->txq_get_index;
+ dma_addr_t buf_phys_addr =
+ txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@
if (!skb)
continue;
- dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
+ dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -4433,7 +4421,7 @@
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause)
{
- int queue = fls(cause >> 16) - 1;
+ int queue = fls(cause) - 1;
return port->txqs[queue];
}
@@ -4460,6 +4448,29 @@
netif_tx_wake_queue(nq);
}
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+ struct mvpp2_tx_queue *txq;
+ struct mvpp2_txq_pcpu *txq_pcpu;
+ unsigned int tx_todo = 0;
+
+ while (cause) {
+ txq = mvpp2_get_tx_queue(port, cause);
+ if (!txq)
+ break;
+
+ txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+ if (txq_pcpu->count) {
+ mvpp2_txq_done(port, txq, txq_pcpu);
+ tx_todo += txq_pcpu->count;
+ }
+
+ cause &= ~(1 << txq->log_id);
+ }
+ return tx_todo;
+}
+
/* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL);
- if (!txq_pcpu->tx_skb) {
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
- return -ENOMEM;
- }
+ if (!txq_pcpu->tx_skb)
+ goto error;
+
+ txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+ sizeof(dma_addr_t), GFP_KERNEL);
+ if (!txq_pcpu->tx_buffs)
+ goto error;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@
}
return 0;
+
+error:
+ for_each_present_cpu(cpu) {
+ txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+ kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
+ }
+
+ dma_free_coherent(port->dev->dev.parent,
+ txq->size * MVPP2_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+
+ return -ENOMEM;
}
/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
+ kfree(txq_pcpu->tx_buffs);
}
if (txq->descs)
@@ -4805,7 +4831,6 @@
goto err_cleanup;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0;
@@ -4887,6 +4912,49 @@
}
}
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+ ktime_t interval;
+
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ hrtimer_start(&port_pcpu->tx_done_timer, interval,
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+ unsigned int tx_todo, cause;
+
+ if (!netif_running(dev))
+ return;
+ port_pcpu->timer_scheduled = false;
+
+ /* Process all the Tx queues */
+ cause = (1 << txq_number) - 1;
+ tx_todo = mvpp2_tx_done(port, cause);
+
+ /* Set the timer in case not all the packets were processed */
+ if (tx_todo)
+ mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+ struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+ struct mvpp2_port_pcpu,
+ tx_done_timer);
+
+ tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+ return HRTIMER_NORESTART;
+}
+
/* Main RX/TX processing routines */
/* Display more error info */
@@ -5144,11 +5212,11 @@
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
}
}
@@ -5214,12 +5282,12 @@
/* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, skb);
+ mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else {
/* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd;
- mvpp2_txq_inc_put(txq_pcpu, NULL);
+ mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@
dev_kfree_skb_any(skb);
}
+ /* Finalize TX processing */
+ if (txq_pcpu->count >= txq->done_pkts_coal)
+ mvpp2_txq_done(port, txq, txq_pcpu);
+
+ /* Set the timer in case not all frags were processed */
+ if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+ mvpp2_timer_set(port_pcpu);
+ }
+
return NETDEV_TX_OK;
}
@@ -5268,10 +5347,11 @@
netdev_err(dev, "tx fifo underrun error\n");
}
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
{
- struct mvpp2_port *port = arg;
- u32 cause_rx_tx, cause_tx, cause_misc;
+ u32 cause_rx_tx, cause_rx, cause_misc;
+ int rx_done = 0;
+ struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register
*
@@ -5285,7 +5365,7 @@
*/
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) {
@@ -5297,26 +5377,6 @@
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- /* Release TX descriptors */
- if (cause_tx) {
- struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
- struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
- if (txq_pcpu->count)
- mvpp2_txq_done(port, txq, txq_pcpu);
- }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
- u32 cause_rx_tx, cause_rx;
- int rx_done = 0;
- struct mvpp2_port *port = netdev_priv(napi->dev);
-
- on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
- cause_rx_tx = mvpp2_read(port->priv,
- MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */
@@ -5561,6 +5621,8 @@
static int mvpp2_stop(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
+ struct mvpp2_port_pcpu *port_pcpu;
+ int cpu;
mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@
on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port);
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+ tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port);
@@ -5784,7 +5853,6 @@
txq->done_pkts_coal = c->tx_max_coalesced_frames;
}
- on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0;
}
@@ -6035,6 +6103,7 @@
{
struct device_node *phy_node;
struct mvpp2_port *port;
+ struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev;
struct resource *res;
const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@
int features;
int phy_mode;
int priv_common_regs_num = 2;
- int err, i;
+ int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number);
@@ -6135,6 +6204,24 @@
}
mvpp2_port_power_up(port);
+ port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+ if (!port->pcpu) {
+ err = -ENOMEM;
+ goto err_free_txq_pcpu;
+ }
+
+ for_each_present_cpu(cpu) {
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+
+ tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+ (unsigned long)dev);
+ }
+
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n");
- goto err_free_txq_pcpu;
+ goto err_free_port_pcpu;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
@@ -6153,6 +6240,8 @@
priv->port_list[id] = port;
return 0;
+err_free_port_pcpu:
+ free_percpu(port->pcpu);
err_free_txq_pcpu:
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@
int i;
unregister_netdev(port->dev);
+ free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529..06e3e1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@
/* disable cmdif checksum */
MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
err = set_caps(dev, set_ctx, set_sz);
query_ex:
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f78909a..09d2e16 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -952,9 +952,8 @@
sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
- err = dma_mapping_error(adapter->dev,
- sg_dma_address(&tx_ctl->sg));
- if (err) {
+ if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+ err = -ENOMEM;
sg_dma_address(&tx_ctl->sg) = 0;
goto err;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51fa..f790f61 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade..2e7f9a2 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@
rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
ROCKER_OP_FLAG_REMOVE);
unregister_netdev(rocker_port->dev);
+ free_netdev(rocker_port->dev);
}
kfree(rocker->ports);
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 605cc89..b1a4ea2 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1282,7 +1282,12 @@
}
}
- if (core_stats) {
+ if (!core_stats)
+ return stats_count;
+
+ if (nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
+ /* Use vadaptor stats. */
core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
stats[EF10_STAT_rx_multicast] +
stats[EF10_STAT_rx_broadcast];
@@ -1302,6 +1307,26 @@
core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
core_stats->rx_errors = core_stats->rx_crc_errors;
core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ } else {
+ /* Use port stats. */
+ core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
+ stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_port_rx_gtjumbo] +
+ stats[EF10_STAT_port_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
+ core_stats->rx_frame_errors =
+ stats[EF10_STAT_port_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
}
return stats_count;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e..f0e4bb4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
#define NSS_COMMON_CLK_DIV_MASK 0x7f
#define NSS_COMMON_CLK_SRC_CTRL 0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
/* Mode is coded on 1 bit but is different depending on the MAC ID:
* MAC0: QSGMII=0 RGMII=1
* MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@
/* Configure the clock src according to the mode */
regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
- val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index a8a73064..bb1bb72 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@
struct list_head rxhook_list_head;
unsigned int rx_queue_id;
void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
- u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
struct napi_struct rx_napi;
struct napi_struct tx_napi;
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9749dfd..4755838 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
#define NETCP_NAPI_WEIGHT 64
#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
#define NETCP_MAX_MCAST_ADDR 16
@@ -804,30 +805,28 @@
if (likely(fdq == 0)) {
unsigned int primary_buf_len;
/* Allocate a primary receive queue entry */
- buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+ buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
primary_buf_len = SKB_DATA_ALIGN(buf_len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (primary_buf_len <= PAGE_SIZE) {
- bufptr = netdev_alloc_frag(primary_buf_len);
- pad[1] = primary_buf_len;
- } else {
- bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
- GFP_DMA32 | __GFP_COLD);
- pad[1] = 0;
- }
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ pad[1] = primary_buf_len;
if (unlikely(!bufptr)) {
- dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+ dev_warn_ratelimited(netcp->ndev_dev,
+ "Primary RX buffer alloc failed\n");
goto fail;
}
dma = dma_map_single(netcp->dev, bufptr, buf_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(netcp->dev, dma)))
+ goto fail;
+
pad[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
- page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+ page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
if (unlikely(!page)) {
dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
goto fail;
@@ -1010,7 +1009,7 @@
/* Map the linear buffer */
dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (unlikely(!dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
return NULL;
}
@@ -1546,8 +1545,8 @@
knav_queue_disable_notify(netcp->rx_queue);
/* open Rx FDQs */
- for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
- netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+ ++i) {
snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1941,14 +1940,6 @@
netcp->rx_queue_depths[0] = 128;
}
- ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
- netcp->rx_buffer_sizes,
- KNAV_DMA_FDQ_PER_CHAN);
- if (ret) {
- dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
- netcp->rx_buffer_sizes[0] = 1536;
- }
-
ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
if (ret < 0) {
dev_err(dev, "missing \"rx-pool\" parameter\n");
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13..216bfd3 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@
dev->type = ARPHRD_AX25;
/* Perform the low-level AX25 initialization. */
- if ((err = ax_open(ax->dev))) {
+ err = ax_open(ax->dev);
+ if (err)
goto out_free_netdev;
- }
- if (register_netdev(dev))
+ err = register_netdev(dev);
+ if (err)
goto out_free_buffers;
/* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb..d8757bf 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@
netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+ if (len < 0) {
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_length_errors++;
+ goto enqueue_again;
+ }
+
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@
return;
}
+enqueue_again:
rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
if (rc) {
dev_kfree_skb(skb);
@@ -184,7 +191,7 @@
rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
ndev->mtu + ETH_HLEN);
- if (rc == -EINVAL) {
+ if (rc) {
dev_kfree_skb(skb);
goto err;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 1960b46..d7a6524 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -290,6 +290,15 @@
return ERR_PTR(-EINVAL);
}
+ /* propagate the fixed link values to struct phy_device */
+ phy->link = status->link;
+ if (status->link) {
+ phy->speed = status->speed;
+ phy->duplex = status->duplex;
+ phy->pause = status->pause;
+ phy->asym_pause = status->asym_pause;
+ }
+
of_node_get(np);
phy->dev.of_node = np;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index b2197b5..34fe339 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -811,6 +811,7 @@
bool needs_aneg = false, do_suspend = false;
enum phy_state old_state;
int err = 0;
+ int old_link;
mutex_lock(&phydev->lock);
@@ -896,11 +897,18 @@
phydev->adjust_link(phydev->attached_dev);
break;
case PHY_RUNNING:
- /* Only register a CHANGE if we are
- * polling or ignoring interrupts
+ /* Only register a CHANGE if we are polling or ignoring
+ * interrupts and link changed since latest checking.
*/
- if (!phy_interrupt_is_valid(phydev))
- phydev->state = PHY_CHANGELINK;
+ if (!phy_interrupt_is_valid(phydev)) {
+ old_link = phydev->link;
+ err = phy_read_status(phydev);
+ if (err)
+ break;
+
+ if (old_link != phydev->link)
+ phydev->state = PHY_CHANGELINK;
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
@@ -1030,10 +1038,14 @@
int value = -1;
if (phydrv->read_mmd_indirect == NULL) {
- mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+ struct mii_bus *bus = phydev->bus;
+
+ mutex_lock(&bus->mdio_lock);
+ mmd_phy_indirect(bus, prtad, devad, addr);
/* Read the content of the MMD's selected register */
- value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
+ value = bus->read(bus, addr, MII_MMD_DATA);
+ mutex_unlock(&bus->mdio_lock);
} else {
value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
}
@@ -1063,10 +1075,14 @@
struct phy_driver *phydrv = phydev->drv;
if (phydrv->write_mmd_indirect == NULL) {
- mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+ struct mii_bus *bus = phydev->bus;
+
+ mutex_lock(&bus->mdio_lock);
+ mmd_phy_indirect(bus, prtad, devad, addr);
/* Write the data into MMD's selected register */
- phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
+ bus->write(bus, addr, MII_MMD_DATA, data);
+ mutex_unlock(&bus->mdio_lock);
} else {
phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0302483..55f0178 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -176,7 +176,7 @@
if (c45_ids)
dev->c45_ids = *c45_ids;
dev->bus = bus;
- dev->dev.parent = bus->parent;
+ dev->dev.parent = &bus->dev;
dev->dev.bus = &mdio_bus_type;
dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c0f6479e..70b0895 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -91,19 +91,18 @@
}
/*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down. If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
*/
static int lan87xx_read_status(struct phy_device *phydev)
{
int err = genphy_read_status(phydev);
+ int i;
if (!phydev->link) {
/* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@
if (rc < 0)
return rc;
- /* Sleep 64 ms to allow ~5 link test pulses to be sent */
- msleep(64);
+ /* Wait max 640 ms to detect energy */
+ for (i = 0; i < 64; i++) {
+ /* Sleep to allow link test pulses to be sent */
+ msleep(10);
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+ if (rc & MII_LAN83C185_ENERGYON)
+ break;
+ }
/* Re-enable EDPD */
rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@
/* basic functions */
.config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
+ .read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 9d15566..fa8f504 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -269,9 +269,9 @@
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
@@ -593,8 +595,10 @@
mutex_lock(&ppp_mutex);
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
+ rtnl_lock();
if (file == ppp->owner)
- ppp_shutdown_interface(ppp);
+ unregister_netdevice(ppp->dev);
+ rtnl_unlock();
}
if (atomic_long_read(&file->f_count) < 2) {
ppp_release(NULL, file);
@@ -838,11 +842,10 @@
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(net, unit, &err);
+ ppp = ppp_create_interface(net, unit, file, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
- ppp->owner = file;
err = -EFAULT;
if (put_user(ppp->file.index, p))
break;
@@ -916,6 +919,16 @@
static __net_exit void ppp_exit_net(struct net *net)
{
struct ppp_net *pn = net_generic(net, ppp_net_id);
+ struct ppp *ppp;
+ LIST_HEAD(list);
+ int id;
+
+ rtnl_lock();
+ idr_for_each_entry(&pn->units_idr, ppp, id)
+ unregister_netdevice_queue(ppp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
idr_destroy(&pn->units_idr);
}
@@ -1088,8 +1101,28 @@
return 0;
}
+static void ppp_dev_uninit(struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+ ppp_lock(ppp);
+ ppp->closing = 1;
+ ppp_unlock(ppp);
+
+ mutex_lock(&pn->all_ppp_mutex);
+ unit_put(&pn->units_idr, ppp->file.index);
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ ppp->owner = NULL;
+
+ ppp->file.dead = 1;
+ wake_up_interruptible(&ppp->file.rwait);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
+ .ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+ struct file *file, int *retp)
{
struct ppp *ppp;
struct ppp_net *pn;
@@ -2688,6 +2721,7 @@
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+ ppp->owner = file;
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
@@ -2776,34 +2810,6 @@
}
/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
- struct ppp_net *pn;
-
- pn = ppp_pernet(ppp->ppp_net);
- mutex_lock(&pn->all_ppp_mutex);
-
- /* This will call dev_close() for us. */
- ppp_lock(ppp);
- if (!ppp->closing) {
- ppp->closing = 1;
- ppp_unlock(ppp);
- unregister_netdev(ppp->dev);
- unit_put(&pn->units_idr, ppp->file.index);
- } else
- ppp_unlock(ppp);
-
- ppp->file.dead = 1;
- ppp->owner = NULL;
- wake_up_interruptible(&ppp->file.rwait);
-
- mutex_unlock(&pn->all_ppp_mutex);
-}
-
-/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs
* that reference the unit.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9d43460..64a60af 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -785,6 +785,7 @@
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3c86b10..e049857 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -778,7 +778,7 @@
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
- int retval, pm;
+ int retval, pm, mpn;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@@ -809,6 +809,8 @@
usbnet_purge_paused_rxq(dev);
+ mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+
/* deferred work (task, timer, softirq) must also stop.
* can't flush_scheduled_work() until we drop rtnl (later),
* else workers could deadlock; so make workers a NOP.
@@ -819,8 +821,7 @@
if (!pm)
usb_autopm_put_interface(dev->intf);
- if (info->manage_power &&
- !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+ if (info->manage_power && mpn)
info->manage_power(dev, 0);
else
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7fbca37..237f8e5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum)
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 34c519e..5bc4b1e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2216,6 +2216,8 @@
if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
ret = vxlan_igmp_join(vxlan);
+ if (ret == -EADDRINUSE)
+ ret = 0;
if (ret) {
vxlan_sock_release(vs);
return ret;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b73..848ea6a 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@
chan->netdev->base_addr = chan->cosa->datareg;
chan->netdev->irq = chan->cosa->irq;
chan->netdev->dma = chan->cosa->dma;
- if (register_hdlc_device(chan->netdev)) {
+ err = register_hdlc_device(chan->netdev);
+ if (err) {
netdev_warn(chan->netdev,
"register_hdlc_device() failed\n");
free_netdev(chan->netdev);
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd..b2f0d24 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@
switch (phy->rev) {
case 6:
case 5:
- if (sprom->fem.ghz5.extpa_gain == 3)
+ if (sprom->fem.ghz2.extpa_gain == 3)
return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
/* fall through */
case 4:
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5000bfcd..5514ad6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@
cmd->scan_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
- if (iwl_mvm_scan_total_iterations(params) == 0)
+ if (iwl_mvm_scan_total_iterations(params) == 1)
cmd->ooc_priority =
iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
else
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 6203c4a..9e144e7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -478,10 +478,16 @@
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_WAKE_ME);
- else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE |
CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ mdelay(1);
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ }
mdelay(5);
}
@@ -575,6 +581,10 @@
if (ret >= 0)
return 0;
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ msleep(1);
+
for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@
do {
ret = iwl_pcie_set_hw_ready(trans);
- if (ret >= 0)
- return 0;
+ if (ret >= 0) {
+ ret = 0;
+ goto out;
+ }
usleep_range(200, 1000);
t += 200;
@@ -593,6 +605,10 @@
IWL_ERR(trans, "Couldn't prepare the card\n");
+out:
+ iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c21..607acb5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@
/* start timer if queue currently empty */
if (q->read_ptr == q->write_ptr) {
- if (txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+ if (txq->wd_timeout) {
+ /*
+ * If the TXQ is active, then set the timer, if not,
+ * set the timer in remainder so that the timer will
+ * be armed with the right value when the station will
+ * wake up.
+ */
+ if (!txq->frozen)
+ mod_timer(&txq->stuck_timer,
+ jiffies + txq->wd_timeout);
+ else
+ txq->frozen_expiry_remainder = txq->wd_timeout;
+ }
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
iwl_trans_pcie_ref(trans);
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff..1c6788a 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@
(struct rsi_91x_sdiodev *)adapter->rsi_dev;
u32 len;
u32 num_blocks;
+ const u8 *fw;
const struct firmware *fw_entry = NULL;
u32 block_size = dev->tx_blk_size;
int status = 0;
@@ -200,6 +201,10 @@
return status;
}
+ /* Copy firmware into DMA-accessible memory */
+ fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
len = fw_entry->size;
if (len % 4)
@@ -210,7 +215,8 @@
rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
- status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+ status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce7..30c2cf7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@
return status;
}
+ /* Copy firmware into DMA-accessible memory */
fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
len = fw_entry->size;
if (len % 4)
@@ -158,6 +161,7 @@
rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
status = rsi_copy_to_card(common, fw, len, num_blocks);
+ kfree(fw);
release_firmware(fw_entry);
return status;
}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b..585d088 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+ struct rtl_tcb_desc tcb_desc;
- if (skb)
- rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+ if (skb) {
+ memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+ rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+ }
}
static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02..7bf88d9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@
module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
bool, 0444);
MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e19..28577a3 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
atomic_dec(&queue->inflight_packets);
+
+ /* Wake the dealloc thread _after_ decrementing inflight_packets so
+ * that if kthread_stop() has already been called, the dealloc thread
+ * does not wait forever with nothing to wake it.
+ */
+ wake_up(&queue->dealloc_wq);
}
int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 7d50711..3f44b52 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
- struct gnttab_map_grant_ref *gop)
+ struct gnttab_map_grant_ref *gop,
+ unsigned int frag_overflow,
+ struct sk_buff *nskb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
pending_ring_idx_t index;
- unsigned int nr_slots, frag_overflow = 0;
+ unsigned int nr_slots;
- /* At this point shinfo->nr_frags is in fact the number of
- * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
- */
- if (shinfo->nr_frags > MAX_SKB_FRAGS) {
- frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
- BUG_ON(frag_overflow > MAX_SKB_FRAGS);
- shinfo->nr_frags = MAX_SKB_FRAGS;
- }
nr_slots = shinfo->nr_frags;
/* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@
}
if (frag_overflow) {
- struct sk_buff *nskb = xenvif_alloc_skb(0);
- if (unlikely(nskb == NULL)) {
- if (net_ratelimit())
- netdev_err(queue->vif->dev,
- "Can't allocate the frag_list skb.\n");
- return NULL;
- }
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
@@ -1175,9 +1162,10 @@
unsigned *copy_ops,
unsigned *map_ops)
{
- struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
- struct sk_buff *skb;
+ struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+ struct sk_buff *skb, *nskb;
int ret;
+ unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@
break;
}
+ skb_shinfo(skb)->nr_frags = ret;
+ if (data_len < txreq.size)
+ skb_shinfo(skb)->nr_frags++;
+ /* At this point shinfo->nr_frags is in fact the number of
+ * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+ */
+ frag_overflow = 0;
+ nskb = NULL;
+ if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+ frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+ BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, idx);
+ if (net_ratelimit())
+ netdev_err(queue->vif->dev,
+ "Can't allocate the frag_list skb.\n");
+ break;
+ }
+ }
+
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
+ kfree_skb(nskb);
break;
}
}
@@ -1294,9 +1306,7 @@
(*copy_ops)++;
- skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
- skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@
queue->pending_cons++;
- request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
- if (request_gop == NULL) {
- kfree_skb(skb);
- xenvif_tx_err(queue, &txreq, idx);
- break;
- }
- gop = request_gop;
+ gop = xenvif_get_requests(queue, skb, txfrags, gop,
+ frag_overflow, nskb);
__skb_queue_tail(&queue->tx_queue, skb);
@@ -1536,7 +1541,6 @@
smp_wmb();
queue->dealloc_prod++;
} while (ubuf);
- wake_up(&queue->dealloc_wq);
spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2..2e25307 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@
ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release;
- dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+ dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
ntb->ctx = NULL;
ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4..1c6386d 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -142,10 +142,11 @@
void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
void *data, int len);
+ struct list_head rx_post_q;
struct list_head rx_pend_q;
struct list_head rx_free_q;
- spinlock_t ntb_rx_pend_q_lock;
- spinlock_t ntb_rx_free_q_lock;
+ /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+ spinlock_t ntb_rx_q_lock;
void *rx_buff;
unsigned int rx_index;
unsigned int rx_max_entry;
@@ -211,6 +212,8 @@
bool link_is_up;
struct delayed_work link_work;
struct work_struct link_cleanup;
+
+ struct dentry *debugfs_node_dir;
};
enum {
@@ -436,13 +439,17 @@
char *buf;
ssize_t ret, out_offset, out_count;
+ qp = filp->private_data;
+
+ if (!qp || !qp->link_is_up)
+ return 0;
+
out_count = 1000;
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- qp = filp->private_data;
out_offset = 0;
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"NTB QP stats\n");
@@ -534,6 +541,27 @@
return entry;
}
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+ struct list_head *list,
+ struct list_head *to_list)
+{
+ struct ntb_queue_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ if (list_empty(list)) {
+ entry = NULL;
+ } else {
+ entry = list_first_entry(list, struct ntb_queue_entry, entry);
+ list_move_tail(&entry->entry, to_list);
+ }
+
+ spin_unlock_irqrestore(lock, flags);
+
+ return entry;
+}
+
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
unsigned int qp_num)
{
@@ -601,13 +629,16 @@
}
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
- unsigned int size)
+ resource_size_t size)
{
struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
struct pci_dev *pdev = nt->ndev->pdev;
- unsigned int xlat_size, buff_size;
+ size_t xlat_size, buff_size;
int rc;
+ if (!size)
+ return -EINVAL;
+
xlat_size = round_up(size, mw->xlat_align_size);
buff_size = round_up(size, mw->xlat_align);
@@ -627,7 +658,7 @@
if (!mw->virt_addr) {
mw->xlat_size = 0;
mw->buff_size = 0;
- dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+ dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
buff_size);
return -ENOMEM;
}
@@ -867,6 +898,8 @@
if (qp->event_handler)
qp->event_handler(qp->cb_data, qp->link_is_up);
+
+ tasklet_schedule(&qp->rxc_db_work);
} else if (nt->link_is_up)
schedule_delayed_work(&qp->link_work,
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
qp->tx_max_entry = tx_size / qp->tx_max_frame;
- if (nt_debugfs_dir) {
+ if (nt->debugfs_node_dir) {
char debugfs_name[4];
snprintf(debugfs_name, 4, "qp%d", qp_num);
qp->debugfs_dir = debugfs_create_dir(debugfs_name,
- nt_debugfs_dir);
+ nt->debugfs_node_dir);
qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@
INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
- spin_lock_init(&qp->ntb_rx_pend_q_lock);
- spin_lock_init(&qp->ntb_rx_free_q_lock);
+ spin_lock_init(&qp->ntb_rx_q_lock);
spin_lock_init(&qp->ntb_tx_free_q_lock);
+ INIT_LIST_HEAD(&qp->rx_post_q);
INIT_LIST_HEAD(&qp->rx_pend_q);
INIT_LIST_HEAD(&qp->rx_free_q);
INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@
goto err2;
}
+ if (nt_debugfs_dir) {
+ nt->debugfs_node_dir =
+ debugfs_create_dir(pci_name(ndev->pdev),
+ nt_debugfs_dir);
+ }
+
for (i = 0; i < qp_count; i++) {
rc = ntb_transport_init_queue(nt, i);
if (rc)
@@ -1107,22 +1146,47 @@
kfree(nt);
}
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
+{
+ struct ntb_queue_entry *entry;
+ void *cb_data;
+ unsigned int len;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+ while (!list_empty(&qp->rx_post_q)) {
+ entry = list_first_entry(&qp->rx_post_q,
+ struct ntb_queue_entry, entry);
+ if (!(entry->flags & DESC_DONE_FLAG))
+ break;
+
+ entry->rx_hdr->flags = 0;
+ iowrite32(entry->index, &qp->rx_info->entry);
+
+ cb_data = entry->cb_data;
+ len = entry->len;
+
+ list_move_tail(&entry->entry, &qp->rx_free_q);
+
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+
+ if (qp->rx_handler && qp->client_ready)
+ qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+ spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+ }
+
+ spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
static void ntb_rx_copy_callback(void *data)
{
struct ntb_queue_entry *entry = data;
- struct ntb_transport_qp *qp = entry->qp;
- void *cb_data = entry->cb_data;
- unsigned int len = entry->len;
- struct ntb_payload_header *hdr = entry->rx_hdr;
- hdr->flags = 0;
+ entry->flags |= DESC_DONE_FLAG;
- iowrite32(entry->index, &qp->rx_info->entry);
-
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
-
- if (qp->rx_handler && qp->client_ready)
- qp->rx_handler(qp, qp->cb_data, cb_data, len);
+ ntb_complete_rxc(entry->qp);
}
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@
ntb_rx_copy_callback(entry);
}
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
- size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
- size_t pay_off, buff_off;
+ size_t pay_off, buff_off, len;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
- entry->len = len;
+ len = entry->len;
if (!chan)
goto err;
@@ -1226,7 +1289,6 @@
struct ntb_payload_header *hdr;
struct ntb_queue_entry *entry;
void *offset;
- int rc;
offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@
return -EIO;
}
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
if (!entry) {
dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
qp->rx_err_no_buf++;
-
- rc = -ENOMEM;
- goto err;
+ return -EAGAIN;
}
+ entry->rx_hdr = hdr;
+ entry->index = qp->rx_index;
+
if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
"receive buffer overflow! Wanted %d got %d\n",
hdr->len, entry->len);
qp->rx_err_oflow++;
- rc = -EIO;
- goto err;
+ entry->len = -EIO;
+ entry->flags |= DESC_DONE_FLAG;
+
+ ntb_complete_rxc(qp);
+ } else {
+ dev_dbg(&qp->ndev->pdev->dev,
+ "RX OK index %u ver %u size %d into buf size %d\n",
+ qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+ qp->rx_bytes += hdr->len;
+ qp->rx_pkts++;
+
+ entry->len = hdr->len;
+
+ ntb_async_rx(entry, offset);
}
- dev_dbg(&qp->ndev->pdev->dev,
- "RX OK index %u ver %u size %d into buf size %d\n",
- qp->rx_index, hdr->ver, hdr->len, entry->len);
-
- qp->rx_bytes += hdr->len;
- qp->rx_pkts++;
-
- entry->index = qp->rx_index;
- entry->rx_hdr = hdr;
-
- ntb_async_rx(entry, offset, hdr->len);
-
qp->rx_index++;
qp->rx_index %= qp->rx_max_entry;
return 0;
-
-err:
- /* FIXME: if this syncrhonous update of the rx_index gets ahead of
- * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
- * scenarios:
- *
- * 1) The peer might miss this update, but observe the update
- * from the memcpy completion callback. In this case, the buffer will
- * not be freed on the peer to be reused for a different packet. The
- * successful rx of a later packet would clear the condition, but the
- * condition could persist if several rx fail in a row.
- *
- * 2) The peer may observe this update before the asyncrhonous copy of
- * prior packets is completed. The peer may overwrite the buffers of
- * the prior packets before they are copied.
- *
- * 3) Both: the peer may observe the update, and then observe the index
- * decrement by the asynchronous completion callback. Who knows what
- * badness that will cause.
- */
- hdr->flags = 0;
- iowrite32(qp->rx_index, &qp->rx_info->entry);
-
- return rc;
}
static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@
break;
}
- if (qp->dma_chan)
+ if (i && qp->dma_chan)
dma_async_issue_pending(qp->dma_chan);
if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@
goto err1;
entry->qp = qp;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
}
@@ -1634,7 +1674,7 @@
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
err1:
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
if (qp->dma_chan)
dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@
*/
void ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
- struct ntb_transport_ctx *nt = qp->transport;
struct pci_dev *pdev;
struct ntb_queue_entry *entry;
u64 qp_bit;
@@ -1689,18 +1728,23 @@
qp->tx_handler = NULL;
qp->event_handler = NULL;
- while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry);
- while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
- dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+ kfree(entry);
+ }
+
+ while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+ dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
kfree(entry);
}
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry);
- nt->qp_bitmap_free |= qp_bit;
+ qp->transport->qp_bitmap_free |= qp_bit;
dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
}
@@ -1724,14 +1768,14 @@
if (!qp || qp->client_ready)
return NULL;
- entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
if (!entry)
return NULL;
buf = entry->cb_data;
*len = entry->len;
- ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
return buf;
}
@@ -1757,15 +1801,18 @@
if (!qp)
return -EINVAL;
- entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+ entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
if (!entry)
return -ENOMEM;
entry->cb_data = cb;
entry->buf = data;
entry->len = len;
+ entry->flags = 0;
- ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+ ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
+
+ tasklet_schedule(&qp->rxc_db_work);
return 0;
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 73de4ef..944f500 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -2,7 +2,7 @@
# PCI configuration
#
config PCI_BUS_ADDR_T_64BIT
- def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+ def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
depends on PCI
config PCI_MSI
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index cefd636..f6ae0d0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,7 +997,12 @@
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
parent = pci_upstream_bridge(pdev);
- if (!parent->has_secondary_link)
+
+ /*
+ * Usually there's an upstream device (Root Port or Switch
+ * Downstream Port), but we can't assume one exists.
+ */
+ if (parent && !parent->has_secondary_link)
pdev->has_secondary_link = 1;
}
}
@@ -1103,7 +1108,7 @@
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
-static void pci_msi_setup_pci_dev(struct pci_dev *dev)
+void pci_msi_setup_pci_dev(struct pci_dev *dev)
{
/*
* Disable the MSI hardware to avoid screaming interrupts
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb13299..3271cd1 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
menuconfig CHROME_PLATFORMS
bool "Platform support for Chrome hardware"
- depends on X86 || ARM
---help---
Say Y here to get to see options for platform support for
various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 26270c3..ce129e5 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.17"
+#define DRV_VERSION "1.6.0.17a"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 155b286..25436cd 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -425,6 +425,7 @@
unsigned long ptr;
struct fc_rport_priv *rdata;
spinlock_t *io_lock = NULL;
+ int io_lock_acquired = 0;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@
spin_lock_irqsave(io_lock, flags);
/* initialize rest of io_req */
+ io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@
(((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
/* if only we issued IO, will we have the io lock */
- if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+ if (io_lock_acquired)
spin_unlock_irqrestore(io_lock, flags);
atomic_dec(&fnic->in_flight);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a094..30f9ef0 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@
if (resp) {
resp(sp, fp, arg);
res = true;
- } else if (!IS_ERR(fp)) {
- fc_frame_free(fp);
}
spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@
* If new exch resp handler is valid then call that
* first.
*/
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
fc_exch_release(ep);
return;
@@ -1695,7 +1694,8 @@
fc_exch_hold(ep);
if (!rc)
fc_exch_delete(ep);
- fc_invoke_resp(ep, sp, fp);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c679594..2d5909c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1039,11 +1039,26 @@
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
- if (!fc_fcp_lock_pkt(fsp)) {
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ fsp->state |= FC_SRB_COMPL;
+ /*
+ * TODO: dropping scsi_pkt_lock and then reacquiring
+ * again around fc_fcp_cleanup_cmd() is required,
+ * since fc_fcp_cleanup_cmd() calls into
+ * fc_seq_set_resp() and that func preempts cpu using
+ * schedule. May be schedule and related code should be
+ * removed instead of unlocking here to avoid scheduling
+ * while atomic bug.
+ */
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
fc_fcp_cleanup_cmd(fsp, error);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp);
- fc_fcp_unlock_pkt(fsp);
}
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24..98d9bb6 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2941,10 +2941,10 @@
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- unsigned long flags;
del_timer_sync(&conn->transport_timer);
+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@
}
spin_unlock_bh(&session->frwd_lock);
- /*
- * Block until all in-progress commands for this connection
- * time out or fail.
- */
- for (;;) {
- spin_lock_irqsave(session->host->host_lock, flags);
- if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
- spin_unlock_irqrestore(session->host->host_lock, flags);
- break;
- }
- spin_unlock_irqrestore(session->host->host_lock, flags);
- msleep_interruptible(500);
- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
- "host_busy %d host_failed %d\n",
- atomic_read(&session->host->host_busy),
- session->host->host_failed);
- /*
- * force eh_abort() to unblock
- */
- wake_up(&conn->ehwait);
- }
-
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);
@@ -2994,6 +2972,7 @@
if (session->leadconn == conn)
session->leadconn = NULL;
spin_unlock_bh(&session->frwd_lock);
+ mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cfadcce..6457a8a 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -26,7 +26,6 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
-#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -2523,33 +2522,3 @@
}
}
EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- * formatted sense data buffer
- * @buf: Where to build sense data
- * @info: 64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
- if ((buf[0] & 0x7f) == 0x72) {
- u8 *ucp, len;
-
- len = buf[7];
- ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
- if (!ucp) {
- buf[7] = len + 0xa;
- ucp = buf + 8 + len;
- }
- ucp[0] = 0;
- ucp[1] = 0xa;
- ucp[2] = 0x80; /* Valid bit */
- ucp[3] = 0;
- put_unaligned_be64(info, &ucp[4]);
- } else if ((buf[0] & 0x7f) == 0x70) {
- buf[0] |= 0x80;
- put_unaligned_be64(info, &buf[3]);
- }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 9e43ae1..e4b7998 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -217,15 +217,15 @@
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
struct scsi_device *sdev = to_scsi_device(dev);
- int err;
+ int err = 0;
- err = blk_pre_runtime_suspend(sdev->request_queue);
- if (err)
- return err;
- if (pm && pm->runtime_suspend)
+ if (pm && pm->runtime_suspend) {
+ err = blk_pre_runtime_suspend(sdev->request_queue);
+ if (err)
+ return err;
err = pm->runtime_suspend(dev);
- blk_post_runtime_suspend(sdev->request_queue, err);
-
+ blk_post_runtime_suspend(sdev->request_queue, err);
+ }
return err;
}
@@ -248,11 +248,11 @@
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
- blk_pre_runtime_resume(sdev->request_queue);
- if (pm && pm->runtime_resume)
+ if (pm && pm->runtime_resume) {
+ blk_pre_runtime_resume(sdev->request_queue);
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
-
+ blk_post_runtime_resume(sdev->request_queue, err);
+ }
return err;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4..a20da8c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,9 +2770,9 @@
max_xfer = sdkp->max_xfer_blocks;
max_xfer <<= ilog2(sdp->sector_size) - 9;
- max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
- max_xfer);
- blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+ sdkp->disk->queue->limits.max_sectors =
+ min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
kfree(buffer);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cd77a06..fd09290 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -968,9 +968,9 @@
cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
- if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+ if (hdr->flags & ISCSI_FLAG_CMD_READ)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
- } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index c2e9fea9..860e840 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -457,8 +457,15 @@
if (!strcmp(t->tf_ops->name, fo->name)) {
BUG_ON(atomic_read(&t->tf_access_cnt));
list_del(&t->tf_list);
+ mutex_unlock(&g_tf_lock);
+ /*
+ * Wait for any outstanding fabric se_deve_entry->rcu_head
+ * callbacks to complete post kfree_rcu(), before allowing
+ * fabric driver unload of TFO->module to proceed.
+ */
+ rcu_barrier();
kfree(t);
- break;
+ return;
}
}
mutex_unlock(&g_tf_lock);
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 62ea4e8..be9cefc 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -84,8 +84,16 @@
list_for_each_entry(tb, &backend_list, list) {
if (tb->ops == ops) {
list_del(&tb->list);
+ mutex_unlock(&backend_mutex);
+ /*
+ * Wait for any outstanding backend driver ->rcu_head
+ * callbacks to complete post TBO->free_device() ->
+ * call_rcu(), before allowing backend driver module
+ * unload of target_backend_ops->owner to proceed.
+ */
+ rcu_barrier();
kfree(tb);
- break;
+ return;
}
}
mutex_unlock(&backend_mutex);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b5ba1ec..f87d4ce 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -1203,17 +1203,13 @@
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
+ struct scsi_lun slun;
unsigned char *buf;
u32 lun_count = 0, offset = 8;
-
- if (cmd->data_length < 16) {
- pr_warn("REPORT LUNS allocation length %u too small\n",
- cmd->data_length);
- return TCM_INVALID_CDB_FIELD;
- }
+ __be32 len;
buf = transport_kmap_data_sg(cmd);
- if (!buf)
+ if (cmd->data_length && !buf)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
@@ -1221,11 +1217,9 @@
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
+ if (!sess)
goto done;
- }
+
nacl = sess->se_node_acl;
rcu_read_lock();
@@ -1236,10 +1230,12 @@
* See SPC2-R20 7.19.
*/
lun_count++;
- if ((offset + 8) > cmd->data_length)
+ if (offset >= cmd->data_length)
continue;
- int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+ int_to_scsilun(deve->mapped_lun, &slun);
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
offset += 8;
}
rcu_read_unlock();
@@ -1248,12 +1244,22 @@
* See SPC3 r07, page 159.
*/
done:
- lun_count *= 8;
- buf[0] = ((lun_count >> 24) & 0xff);
- buf[1] = ((lun_count >> 16) & 0xff);
- buf[2] = ((lun_count >> 8) & 0xff);
- buf[3] = (lun_count & 0xff);
- transport_kunmap_data_sg(cmd);
+ /*
+ * If no LUNs are accessible, report virtual LUN 0.
+ */
+ if (lun_count == 0) {
+ int_to_scsilun(0, &slun);
+ if (cmd->data_length > 8)
+ memcpy(buf + offset, &slun,
+ min(8u, cmd->data_length - offset));
+ lun_count = 1;
+ }
+
+ if (buf) {
+ len = cpu_to_be32(lun_count * 8);
+ memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+ transport_kunmap_data_sg(cmd);
+ }
target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
return 0;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6509c61..620dcd4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -68,7 +68,7 @@
* registered cooling device.
* @cpufreq_state: integer value representing the current state of cpufreq
* cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
* frequency.
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
@@ -91,7 +91,7 @@
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
- unsigned int cpufreq_val;
+ unsigned int clipped_freq;
unsigned int max_level;
unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -185,14 +188,14 @@
{
struct cpufreq_cooling_device *cpufreq_dev;
- mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
return get_level(cpufreq_dev, freq);
}
}
- mutex_unlock(&cooling_cpufreq_lock);
+ mutex_unlock(&cooling_list_lock);
pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@
unsigned long event, void *data)
{
struct cpufreq_policy *policy = data;
- unsigned long max_freq = 0;
+ unsigned long clipped_freq;
struct cpufreq_cooling_device *cpufreq_dev;
- switch (event) {
-
- case CPUFREQ_ADJUST:
- mutex_lock(&cooling_cpufreq_lock);
- list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
- if (!cpumask_test_cpu(policy->cpu,
- &cpufreq_dev->allowed_cpus))
- continue;
-
- max_freq = cpufreq_dev->cpufreq_val;
-
- if (policy->max != max_freq)
- cpufreq_verify_within_limits(policy, 0,
- max_freq);
- }
- mutex_unlock(&cooling_cpufreq_lock);
- break;
- default:
+ if (event != CPUFREQ_ADJUST)
return NOTIFY_DONE;
+
+ mutex_lock(&cooling_list_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+ continue;
+
+ /*
+ * policy->max is the maximum allowed frequency defined by user
+ * and clipped_freq is the maximum that thermal constraints
+ * allow.
+ *
+ * If clipped_freq is lower than policy->max, then we need to
+ * readjust policy->max.
+ *
+ * But, if clipped_freq is greater than policy->max, we don't
+ * need to do anything.
+ */
+ clipped_freq = cpufreq_dev->clipped_freq;
+
+ if (policy->max > clipped_freq)
+ cpufreq_verify_within_limits(policy, 0, clipped_freq);
+ break;
}
+ mutex_unlock(&cooling_list_lock);
return NOTIFY_OK;
}
@@ -519,7 +528,7 @@
clip_freq = cpufreq_device->freq_table[state];
cpufreq_device->cpufreq_state = state;
- cpufreq_device->cpufreq_val = clip_freq;
+ cpufreq_device->clipped_freq = clip_freq;
cpufreq_update_policy(cpu);
@@ -861,17 +870,19 @@
pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
- cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+ cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
mutex_lock(&cooling_cpufreq_lock);
+ mutex_lock(&cooling_list_lock);
+ list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+ mutex_unlock(&cooling_list_lock);
+
/* Register the notifier for first cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ if (!cpufreq_dev_count++)
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
@@ -1013,13 +1024,17 @@
return;
cpufreq_dev = cdev->devdata;
- mutex_lock(&cooling_cpufreq_lock);
- list_del(&cpufreq_dev->node);
/* Unregister the notifier for the last cpufreq cooling device */
- if (list_empty(&cpufreq_dev_list))
+ mutex_lock(&cooling_cpufreq_lock);
+ if (!--cpufreq_dev_count)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
+
+ mutex_lock(&cooling_list_lock);
+ list_del(&cpufreq_dev->node);
+ mutex_unlock(&cooling_list_lock);
+
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 63a448f..2516769 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -258,8 +258,7 @@
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
- req_power = devm_kcalloc(&tz->device, num_actors * 5,
- sizeof(*req_power), GFP_KERNEL);
+ req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
if (!req_power) {
ret = -ENOMEM;
goto unlock;
@@ -334,7 +333,7 @@
max_allocatable_power, current_temp,
(s32)control_temp - (s32)current_temp);
- devm_kfree(&tz->device, req_power);
+ kfree(req_power);
unlock:
mutex_unlock(&tz->lock);
@@ -426,7 +425,7 @@
return -EINVAL;
}
- params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -468,14 +467,14 @@
return 0;
free:
- devm_kfree(&tz->device, params);
+ kfree(params);
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
- devm_kfree(&tz->device, tz->governor_data);
+ kfree(tz->governor_data);
tz->governor_data = NULL;
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34b..1aaf893 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@
int y;
int c = scr_readw((u16 *) vc->vc_pos);
+ ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
return;
- ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
if (vc->vc_cursor_type & 0x10)
fbcon_del_cursor_timer(info);
else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de5..f888561 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@
# Helper logic selected only by the ARM Versatile platform family.
config PLAT_VERSATILE_CLCD
- def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+ def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
depends on ARM
depends on FB_ARMCLCD && FB=y
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee63..bf407b6 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@
}
prev = port;
} while (of_node_cmp(port->name, "port") != 0);
+
+ of_node_put(ports);
}
return port;
@@ -94,7 +96,7 @@
if (!port)
return NULL;
- np = of_get_next_parent(port);
+ np = of_get_parent(port);
for (i = 0; i < 2 && np; ++i) {
struct property *prop;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457..50bce45 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@
goto err_free_dma;
}
- ret = clk_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret < 0) {
dev_err(dev, "failed to enable clock\n");
goto err_misc_deregister;
@@ -685,7 +685,7 @@
misc_deregister(&priv->misc_dev);
err_disable_clk:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1..b5102aa 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@
index = disp->native_mode;
ret = videomode_from_timings(disp, vm, index);
- if (ret)
- return ret;
display_timings_release(disp);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 1495ecc..96093ae 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -452,12 +452,10 @@
irq_free_desc(irq);
}
-static void xen_evtchn_close(unsigned int port, unsigned int cpu)
+static void xen_evtchn_close(unsigned int port)
{
struct evtchn_close close;
- xen_evtchn_op_close(port, cpu);
-
close.port = port;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
@@ -546,7 +544,7 @@
err:
pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
- xen_evtchn_close(evtchn, NR_CPUS);
+ xen_evtchn_close(evtchn);
return 0;
}
@@ -567,7 +565,7 @@
return;
mask_evtchn(evtchn);
- xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn));
+ xen_evtchn_close(evtchn);
xen_irq_info_cleanup(info);
}
@@ -611,7 +609,7 @@
if (VALID_EVTCHN(evtchn)) {
unsigned int cpu = cpu_from_irq(irq);
- xen_evtchn_close(evtchn, cpu);
+ xen_evtchn_close(evtchn);
switch (type_from_irq(irq)) {
case IRQT_VIRQ:
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 6df8aac..ed673e1 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -255,12 +255,6 @@
}
}
-static bool evtchn_fifo_is_linked(unsigned port)
-{
- event_word_t *word = event_word_from_port(port);
- return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word));
-}
-
static uint32_t clear_linked(volatile event_word_t *word)
{
event_word_t new, old, w;
@@ -287,8 +281,7 @@
static void consume_one_event(unsigned cpu,
struct evtchn_fifo_control_block *control_block,
- unsigned priority, unsigned long *ready,
- bool drop)
+ unsigned priority, unsigned long *ready)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
@@ -320,15 +313,13 @@
if (head == 0)
clear_bit(priority, ready);
- if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
- if (likely(!drop))
- handle_irq_for_port(port);
- }
+ if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
+ handle_irq_for_port(port);
q->head[priority] = head;
}
-static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
+static void evtchn_fifo_handle_events(unsigned cpu)
{
struct evtchn_fifo_control_block *control_block;
unsigned long ready;
@@ -340,16 +331,11 @@
while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
- consume_one_event(cpu, control_block, q, &ready, drop);
+ consume_one_event(cpu, control_block, q, &ready);
ready |= xchg(&control_block->ready, 0);
}
}
-static void evtchn_fifo_handle_events(unsigned cpu)
-{
- __evtchn_fifo_handle_events(cpu, false);
-}
-
static void evtchn_fifo_resume(void)
{
unsigned cpu;
@@ -385,26 +371,6 @@
event_array_pages = 0;
}
-static void evtchn_fifo_close(unsigned port, unsigned int cpu)
-{
- if (cpu == NR_CPUS)
- return;
-
- get_online_cpus();
- if (cpu_online(cpu)) {
- if (WARN_ON(irqs_disabled()))
- goto out;
-
- while (evtchn_fifo_is_linked(port))
- cpu_relax();
- } else {
- __evtchn_fifo_handle_events(cpu, true);
- }
-
-out:
- put_online_cpus();
-}
-
static const struct evtchn_ops evtchn_ops_fifo = {
.max_channels = evtchn_fifo_max_channels,
.nr_channels = evtchn_fifo_nr_channels,
@@ -418,7 +384,6 @@
.unmask = evtchn_fifo_unmask,
.handle_events = evtchn_fifo_handle_events,
.resume = evtchn_fifo_resume,
- .close = evtchn_fifo_close,
};
static int evtchn_fifo_alloc_control_block(unsigned cpu)
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index d18e123..50c2050a 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -68,7 +68,6 @@
bool (*test_and_set_mask)(unsigned port);
void (*mask)(unsigned port);
void (*unmask)(unsigned port);
- void (*close)(unsigned port, unsigned cpu);
void (*handle_events)(unsigned cpu);
void (*resume)(void);
@@ -146,12 +145,6 @@
evtchn_ops->resume();
}
-static inline void xen_evtchn_op_close(unsigned port, unsigned cpu)
-{
- if (evtchn_ops->close)
- return evtchn_ops->close(port, cpu);
-}
-
void xen_evtchn_2l_init(void);
int xen_evtchn_fifo_init(void);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad3272..e303535 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -814,8 +814,10 @@
rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
addrs);
- if (!rv)
+ if (!rv) {
vunmap(vaddr);
+ free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+ }
else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
node->nr_handles);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 518c629..5fa588e 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -844,14 +844,15 @@
struct wb_iter iter;
might_sleep();
-
- if (!bdi_has_dirty_io(bdi))
- return;
restart:
rcu_read_lock();
bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) {
- if (!wb_has_dirty_io(wb) ||
- (skip_if_busy && writeback_in_progress(wb)))
+ /* SYNC_ALL writes out I_DIRTY_TIME too */
+ if (!wb_has_dirty_io(wb) &&
+ (base_work->sync_mode == WB_SYNC_NONE ||
+ list_empty(&wb->b_dirty_time)))
+ continue;
+ if (skip_if_busy && writeback_in_progress(wb))
continue;
base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
@@ -899,8 +900,7 @@
{
might_sleep();
- if (bdi_has_dirty_io(bdi) &&
- (!skip_if_busy || !writeback_in_progress(&bdi->wb))) {
+ if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
base_work->auto_free = 0;
base_work->single_wait = 0;
base_work->single_done = 0;
@@ -2275,8 +2275,12 @@
};
struct backing_dev_info *bdi = sb->s_bdi;
- /* Nothing to do? */
- if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
+ /*
+ * Can't skip on !bdi_has_dirty() because we should wait for !dirty
+ * inodes under writeback and I_DIRTY_TIME inodes ignored by
+ * bdi_has_dirty() need to be written out too.
+ */
+ if (bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 80cc1b3..ebb5e37 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2246,7 +2246,15 @@
err = -EINVAL;
if (old) {
- struct fuse_dev *fud = fuse_get_dev(old);
+ struct fuse_dev *fud = NULL;
+
+ /*
+ * Check against file->f_op because CUSE
+ * uses the same ioctl handler.
+ */
+ if (old->f_op == file->f_op &&
+ old->f_cred->user_ns == file->f_cred->user_ns)
+ fud = fuse_get_dev(old);
if (fud) {
mutex_lock(&fuse_mutex);
diff --git a/fs/namei.c b/fs/namei.c
index fbbcf09..1c2105e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -879,7 +879,7 @@
return 0;
/* Allowed if parent directory not sticky and world-writable. */
- parent = nd->path.dentry->d_inode;
+ parent = nd->inode;
if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
return 0;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 57ca8cc..3b4d8a4 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -743,8 +743,6 @@
uint8_t num_h_tile, num_v_tile;
uint8_t tile_h_loc, tile_v_loc;
uint16_t tile_h_size, tile_v_size;
-
- struct list_head destroy_list;
};
/**
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7990501..53c53c4 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -348,6 +348,25 @@
}
/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
+{
+ unsigned int ver, mnl;
+
+ ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+ if (ver != 2 && ver != 31)
+ return NULL;
+
+ mnl = drm_eld_mnl(eld);
+ if (mnl > 16)
+ return NULL;
+
+ return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
+/**
* drm_eld_sad_count - Get ELD SAD count.
* @eld: pointer to an eld memory structure with sad_count set
*/
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a3..8bc073d 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6c78956..d2992bf 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -385,8 +385,6 @@
SATA_SSP = 0x06, /* Software Settings Preservation */
SATA_DEVSLP = 0x09, /* Device Sleep */
- SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
ATA_SET_MAX_PASSWD = 0x01,
@@ -530,8 +528,6 @@
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -720,20 +716,6 @@
return false;
}
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
- return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 92188b0..51744bc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -484,6 +484,7 @@
extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
void *vcpu_info);
+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
#endif
/* Handling of unhandled and spurious interrupts: */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 05e99b8..81089cf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -139,6 +139,7 @@
#define KVM_REQ_DISABLE_IBS 24
#define KVM_REQ_APIC_PAGE_RELOAD 25
#define KVM_REQ_SMI 26
+#define KVM_REQ_HV_CRASH 27
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -363,9 +364,6 @@
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- u32 bsp_vcpu_id;
-#endif
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
atomic_t online_vcpus;
int last_boosted_vcpu;
@@ -424,8 +422,15 @@
#define vcpu_unimpl(vcpu, fmt, ...) \
kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+#define vcpu_debug(vcpu, fmt, ...) \
+ kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
+ * the caller has read kvm->online_vcpus before (as is the case
+ * for kvm_for_each_vcpu, for example).
+ */
smp_rmb();
return kvm->vcpus[i];
}
@@ -1055,22 +1060,9 @@
#endif /* CONFIG_HAVE_KVM_EVENTFD */
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
-{
- return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
-}
-
-static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
-}
-
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
-
#else
-
static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
-
#endif
static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2e872f9..bf6f117 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1003,6 +1003,34 @@
}
/*
+ * Return true only if the page has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool page_is_pfmemalloc(struct page *page)
+{
+ /*
+ * Page index cannot be this large so this must be
+ * a pfmemalloc page.
+ */
+ return page->index == -1UL;
+}
+
+/*
+ * Only to be called by the page allocator on a freshly allocated
+ * page.
+ */
+static inline void set_page_pfmemalloc(struct page *page)
+{
+ page->index = -1UL;
+}
+
+static inline void clear_page_pfmemalloc(struct page *page)
+{
+ page->index = 0;
+}
+
+/*
* Different kinds of faults, as returned by handle_mm_fault().
* Used to decide whether a process gets delivered SIGBUS or
* just gets major/minor fault counters bumped up.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0038ac7..1554957 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -63,15 +63,6 @@
union {
pgoff_t index; /* Our offset within mapping. */
void *freelist; /* sl[aou]b first free object */
- bool pfmemalloc; /* If set by the page allocator,
- * ALLOC_NO_WATERMARKS was set
- * and the low watermark was not
- * met implying that the system
- * is under some pressure. The
- * caller should try ensure
- * this page is only used to
- * free other pages.
- */
};
union {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8a0321a..860c751 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1202,6 +1202,7 @@
u16 entry; /* driver uses to specify entry, OS writes */
};
+void pci_msi_setup_pci_dev(struct pci_dev *dev);
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d6cdd6e..9b88536 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1602,20 +1602,16 @@
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/*
- * Propagate page->pfmemalloc to the skb if we can. The problem is
- * that not all callers have unique ownership of the page. If
- * pfmemalloc is set, we check the mapping as a mapping implies
- * page->index is set (index and pfmemalloc share space).
- * If it's a valid mapping, we cannot use page->pfmemalloc but we
- * do not lose pfmemalloc information as the pages would not be
- * allocated using __GFP_MEMALLOC.
+ * Propagate page pfmemalloc to the skb if we can. The problem is
+ * that not all callers have unique ownership of the page but rely
+ * on page_is_pfmemalloc doing the right thing(tm).
*/
frag->page.p = page;
frag->page_offset = off;
skb_frag_size_set(frag, size);
page = compound_head(page);
- if (page->pfmemalloc && !page->mapping)
+ if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
@@ -2263,7 +2259,7 @@
static inline void skb_propagate_pfmemalloc(struct page *page,
struct sk_buff *skb)
{
- if (page && page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
skb->pfmemalloc = true;
}
@@ -2884,11 +2880,11 @@
*
* PHY drivers may accept clones of transmitted packets for
* timestamping via their phy_driver.txtstamp method. These drivers
- * must call this function to return the skb back to the stack, with
- * or without a timestamp.
+ * must call this function to return the skb back to the stack with a
+ * timestamp.
*
* @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps, may be NULL if not available
+ * @hwtstamps: hardware time stamps
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 45534da5..644bdc6 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -74,8 +74,6 @@
* @input_dev: the input child device used to communicate events to userspace
* @driver_type: specifies if protocol decoding is done in hardware or software
* @idle: used to keep track of RX state
- * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
- * wakeup protocols is the set of all raw encoders
* @allowed_protocols: bitmask with the supported RC_BIT_* protocols
* @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
* @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@
struct input_dev *input_dev;
enum rc_driver_type driver_type;
bool idle;
- bool encode_wakeup;
u64 allowed_protocols;
u64 enabled_protocols;
u64 allowed_wakeup_protocols;
@@ -246,7 +243,6 @@
#define US_TO_NS(usec) ((usec) * 1000)
#define MS_TO_US(msec) ((msec) * 1000)
#define MS_TO_NS(msec) ((msec) * 1000 * 1000)
-#define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L)
void ir_raw_event_handle(struct rc_dev *dev);
int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@
int ir_raw_event_store_with_filter(struct rc_dev *dev,
struct ir_raw_event *ev);
void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
-int ir_raw_encode_scancode(u64 protocols,
- const struct rc_scancode_filter *scancode,
- struct ir_raw_event *events, unsigned int max);
static inline void ir_raw_event_reset(struct rc_dev *dev)
{
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 22a44c2..c192e1b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -139,6 +139,7 @@
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf
* @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
+ * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@
VB2_BUF_STATE_PREPARING,
VB2_BUF_STATE_PREPARED,
VB2_BUF_STATE_QUEUED,
+ VB2_BUF_STATE_REQUEUEING,
VB2_BUF_STATE_ACTIVE,
VB2_BUF_STATE_DONE,
VB2_BUF_STATE_ERROR,
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 4942710..8d1d7fa 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -28,7 +28,6 @@
u64 * info_out);
extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 865a141..427bc41 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -141,6 +141,8 @@
int io_ops_count;
};
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
/* gets a pointer to data from the firmware block header */
static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
{
@@ -165,4 +167,14 @@
const struct snd_soc_tplg_widget_events *events, int num_events,
u16 event_type);
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+ u32 index)
+{
+ return 0;
+}
+
+#endif
+
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 716ad4a..0d831f9 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -317,6 +317,7 @@
struct {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
+#define KVM_SYSTEM_EVENT_CRASH 3
__u32 type;
__u64 flags;
} system_event;
@@ -481,6 +482,7 @@
((ai) << 26))
#define KVM_S390_INT_IO_MIN 0x00000000u
#define KVM_S390_INT_IO_MAX 0xfffdffffu
+#define KVM_S390_INT_IO_AI_MASK 0x04000000u
struct kvm_s390_interrupt {
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 51b8066..247c50b 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -18,6 +18,12 @@
#include <linux/types.h>
#include <sound/asound.h>
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
/*
* Maximum number of channels topology kcontrol can represent.
*/
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530..b471e5a 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@
}
/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
+
+/*
* Wait until all currently ongoing simple ops have completed.
* Caller must own sem_perm.lock.
* New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@
sem = sma->sem_base + i;
spin_unlock_wait(&sem->lock);
}
+ ipc_smp_acquire__after_spin_is_unlocked();
}
/*
@@ -327,13 +338,12 @@
/* Then check that the global lock is free */
if (!spin_is_locked(&sma->sem_perm.lock)) {
/*
- * The ipc object lock check must be visible on all
- * cores before rechecking the complex count. Otherwise
- * we can race with another thread that does:
+ * We need a memory barrier with acquire semantics,
+ * otherwise we can race with another thread that does:
* complex_count++;
* spin_unlock(sem_perm.lock);
*/
- smp_rmb();
+ ipc_smp_acquire__after_spin_is_unlocked();
/*
* Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
- if (&un->list_proc == &ulp->list_proc)
- semid = -1;
- else
- semid = un->semid;
-
- if (semid == -1) {
+ if (&un->list_proc == &ulp->list_proc) {
+ /*
+ * We must wait for freeary() before freeing this ulp,
+ * in case we raced with last sem_undo. There is a small
+ * possibility where we exit while freeary() didn't
+ * finish unlocking sem_undo_list.
+ */
+ spin_unlock_wait(&ulp->lock);
rcu_read_unlock();
break;
}
+ spin_lock(&ulp->lock);
+ semid = un->semid;
+ spin_unlock(&ulp->lock);
- sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+ /* exit_sem raced with IPC_RMID, nothing to do */
+ if (semid == -1) {
+ rcu_read_unlock();
+ continue;
+ }
+
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma)) {
rcu_read_unlock();
@@ -2112,9 +2133,11 @@
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- spin_lock(&ulp->lock);
+ /* we are the last process using this ulp, acquiring ulp->lock
+ * isn't required. Besides that, we are also protected against
+ * IPC_RMID as we hold sma->sem_perm lock now
+ */
list_del_rcu(&un->list_proc);
- spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ee14e3a..f0acff0 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1223,7 +1223,7 @@
spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
- update_nodemasks_hier(cs, &cs->mems_allowed);
+ update_nodemasks_hier(cs, &trialcs->mems_allowed);
done:
return retval;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae34..e6feb51 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@
perf_pmu_disable(event->pmu);
- event->tstamp_running += tstamp - event->tstamp_stopped;
-
perf_set_shadow_time(event, ctx, tstamp);
perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@
goto out;
}
+ event->tstamp_running += tstamp - event->tstamp_stopped;
+
if (!is_software_event(event))
cpuctx->active_oncpu++;
if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@
perf_event_for_each_child(sibling, func);
}
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
- struct perf_event_context *ctx = event->ctx;
- int ret = 0, active;
+struct period_event {
+ struct perf_event *event;
u64 value;
+};
- if (!is_sampling_event(event))
- return -EINVAL;
+static int __perf_event_period(void *info)
+{
+ struct period_event *pe = info;
+ struct perf_event *event = pe->event;
+ struct perf_event_context *ctx = event->ctx;
+ u64 value = pe->value;
+ bool active;
- if (copy_from_user(&value, arg, sizeof(value)))
- return -EFAULT;
-
- if (!value)
- return -EINVAL;
-
- raw_spin_lock_irq(&ctx->lock);
+ raw_spin_lock(&ctx->lock);
if (event->attr.freq) {
- if (value > sysctl_perf_event_sample_rate) {
- ret = -EINVAL;
- goto unlock;
- }
-
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@
event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu);
}
+ raw_spin_unlock(&ctx->lock);
-unlock:
+ return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+ struct period_event pe = { .event = event, };
+ struct perf_event_context *ctx = event->ctx;
+ struct task_struct *task;
+ u64 value;
+
+ if (!is_sampling_event(event))
+ return -EINVAL;
+
+ if (copy_from_user(&value, arg, sizeof(value)))
+ return -EFAULT;
+
+ if (!value)
+ return -EINVAL;
+
+ if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+ return -EINVAL;
+
+ task = ctx->task;
+ pe.value = value;
+
+ if (!task) {
+ cpu_function_call(event->cpu, __perf_event_period, &pe);
+ return 0;
+ }
+
+retry:
+ if (!task_function_call(task, __perf_event_period, &pe))
+ return 0;
+
+ raw_spin_lock_irq(&ctx->lock);
+ if (ctx->is_active) {
+ raw_spin_unlock_irq(&ctx->lock);
+ task = ctx->task;
+ goto retry;
+ }
+
+ __perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock);
- return ret;
+ return 0;
}
static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@
* to user-space before waking everybody up.
*/
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
@@ -6124,7 +6167,7 @@
else
perf_event_output(event, data, regs);
- if (event->fasync && event->pending_kill) {
+ if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b2be01b..c8aa3f7 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -559,11 +559,13 @@
rb->aux_priv = NULL;
}
- for (pg = 0; pg < rb->aux_nr_pages; pg++)
- rb_free_aux_page(rb, pg);
+ if (rb->aux_nr_pages) {
+ for (pg = 0; pg < rb->aux_nr_pages; pg++)
+ rb_free_aux_page(rb, pg);
- kfree(rb->aux_pages);
- rb->aux_nr_pages = 0;
+ kfree(rb->aux_pages);
+ rb->aux_nr_pages = 0;
+ }
}
void rb_free_aux(struct ring_buffer *rb)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 27f4332..ae21682 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -985,6 +985,23 @@
}
/**
+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
+ * @data: Pointer to interrupt specific data
+ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
+ *
+ * Conditional, as the underlying parent chip might not implement it.
+ */
+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
+{
+ data = data->parent_data;
+
+ if (data->chip->irq_set_type)
+ return data->chip->irq_set_type(data, type);
+
+ return -ENOSYS;
+}
+
+/**
* irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
* @data: Pointer to interrupt specific data
*
@@ -997,7 +1014,7 @@
if (data->chip && data->chip->irq_retrigger)
return data->chip->irq_retrigger(data);
- return -ENOSYS;
+ return 0;
}
/**
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab181..df19ae4 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
#include <linux/hash.h>
#include <linux/bootmem.h>
+#include <linux/debug_locks.h>
/*
* Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@
{
struct __qspinlock *l = (void *)lock;
struct pv_node *node;
+ u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
/*
* We must not unlock if SLOW, because in that case we must first
* unhash. Otherwise it would be possible to have multiple @lock
* entries, which would be BAD.
*/
- if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+ if (likely(lockval == _Q_LOCKED_VAL))
return;
+ if (unlikely(lockval != _Q_SLOW_VAL)) {
+ if (debug_locks_silent)
+ return;
+ WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+ return;
+ }
+
/*
* Since the above failed to release, this must be the SLOW path.
* Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 5e097fa..84190f0 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -807,8 +807,8 @@
spin_unlock(&base->lock);
base = new_base;
spin_lock(&base->lock);
- timer->flags &= ~TIMER_BASEMASK;
- timer->flags |= base->cpu;
+ WRITE_ONCE(timer->flags,
+ (timer->flags & ~TIMER_BASEMASK) | base->cpu);
}
}
diff --git a/mm/cma.h b/mm/cma.h
index 1132d73..17c75a4 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@
extern struct cma cma_areas[MAX_CMA_AREAS];
extern unsigned cma_area_count;
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
{
return cma->count >> cma->order_per_bit;
}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6c513a6..7b28e9c 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -2,7 +2,7 @@
* This file contains shadow memory manipulation code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 680ceed..e07c94f 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -2,7 +2,7 @@
* This file contains error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some of code borrowed from https://github.com/xairy/linux by
* Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ea5a936..1f4446a9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1146,8 +1146,11 @@
}
if (!PageHuge(p) && PageTransHuge(hpage)) {
- if (unlikely(split_huge_page(hpage))) {
- pr_err("MCE: %#lx: thp split failed\n", pfn);
+ if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+ if (!PageAnon(hpage))
+ pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+ else
+ pr_err("MCE: %#lx: thp split failed\n", pfn);
if (TestClearPageHWPoison(p))
atomic_long_sub(nr_pages, &num_poisoned_pages);
put_page(p);
@@ -1538,6 +1541,8 @@
*/
ret = __get_any_page(page, pfn, 0);
if (!PageLRU(page)) {
+ /* Drop page reference which is from __get_any_page() */
+ put_page(page);
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
pfn, page->flags);
return -EIO;
@@ -1567,13 +1572,12 @@
unlock_page(hpage);
ret = isolate_huge_page(hpage, &pagelist);
- if (ret) {
- /*
- * get_any_page() and isolate_huge_page() takes a refcount each,
- * so need to drop one here.
- */
- put_page(hpage);
- } else {
+ /*
+ * get_any_page() and isolate_huge_page() takes a refcount each,
+ * so need to drop one here.
+ */
+ put_page(hpage);
+ if (!ret) {
pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
return -EBUSY;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 003dbe4..6da82bc 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1277,6 +1277,7 @@
/* create new memmap entry */
firmware_map_add_hotplug(start, start + size, "System RAM");
+ memblock_add_node(start, size, nid);
goto out;
@@ -2013,6 +2014,8 @@
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
+ memblock_free(start, size);
+ memblock_remove(start, size);
arch_remove_memory(start, size);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index beda417..5b5240b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1343,12 +1343,15 @@
set_page_owner(page, order, gfp_flags);
/*
- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
+ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
* steps that will free more memory. The caller should avoid the page
* being used for !PFMEMALLOC purposes.
*/
- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+ if (alloc_flags & ALLOC_NO_WATERMARKS)
+ set_page_pfmemalloc(page);
+ else
+ clear_page_pfmemalloc(page);
return 0;
}
@@ -3345,7 +3348,7 @@
atomic_add(size - 1, &page->_count);
/* reset page count bias and offset to start of new frag */
- nc->pfmemalloc = page->pfmemalloc;
+ nc->pfmemalloc = page_is_pfmemalloc(page);
nc->pagecnt_bias = size;
nc->offset = size;
}
@@ -5060,6 +5063,10 @@
{
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
/* Get the start and end of the zone */
zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5123,6 +5130,10 @@
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
unsigned long zone_start_pfn, zone_end_pfn;
+ /* When hotadd a new node, the node should be empty */
+ if (!node_start_pfn && !node_end_pfn)
+ return 0;
+
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
diff --git a/mm/slab.c b/mm/slab.c
index 200e224..bbd0b47 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@
}
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
- if (unlikely(page->pfmemalloc))
+ if (page_is_pfmemalloc(page))
pfmemalloc_active = true;
nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
__SetPageSlab(page);
- if (page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
diff --git a/mm/slub.c b/mm/slub.c
index 816df00..f68c0e5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab_cache = s;
__SetPageSlab(page);
- if (page->pfmemalloc)
+ if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
start = page_address(page);
diff --git a/net/9p/client.c b/net/9p/client.c
index 498454b..ea79ee9 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1541,6 +1541,7 @@
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
+ *err = 0;
p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@
struct p9_client *clnt = fid->clnt;
struct p9_req_t *req;
int total = 0;
+ *err = 0;
p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
fid->fid, (unsigned long long) offset,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fb54e6a..6d0b471 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1138,6 +1138,9 @@
* @bat_priv: the bat priv with all the soft interface information
* @skb: packet to check
* @hdr_size: size of the encapsulation header
+ *
+ * Returns true if the packet was snooped and consumed by DAT. False if the
+ * packet has to be delivered to the interface
*/
bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@
uint16_t type;
__be32 ip_src, ip_dst;
uint8_t *hw_src, *hw_dst;
- bool ret = false;
+ bool dropped = false;
unsigned short vid;
if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@
/* if this REPLY is directed to a client of mine, let's deliver the
* packet to the interface
*/
- ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
+ dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
+
+ /* if this REPLY is sent on behalf of a client of mine, let's drop the
+ * packet because the client will reply by itself
+ */
+ dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
out:
- if (ret)
+ if (dropped)
kfree_skb(skb);
- /* if ret == false -> packet has to be delivered to the interface */
- return ret;
+ /* if dropped == false -> deliver to the interface */
+ return dropped;
}
/**
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index bb015862..cffa92d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -439,6 +439,8 @@
INIT_HLIST_NODE(&gw_node->list);
gw_node->orig_node = orig_node;
+ gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+ gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
atomic_set(&gw_node->refcount, 1);
spin_lock_bh(&bat_priv->gw.list_lock);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index c002961..a2fc843 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -479,6 +479,9 @@
*/
void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
{
+ if (!vlan)
+ return;
+
if (atomic_dec_and_test(&vlan->refcount)) {
spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
hlist_del_rcu(&vlan->list);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b482495..5809b39 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -594,6 +594,12 @@
/* increase the refcounter of the related vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
+ addr, BATADV_PRINT_VID(vid))) {
+ kfree(tt_local);
+ tt_local = NULL;
+ goto out;
+ }
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@
struct batadv_tt_local_entry *tt_local_entry;
uint16_t flags, curr_flags = BATADV_NO_FLAGS;
struct batadv_softif_vlan *vlan;
+ void *tt_entry_exists;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@@ -1061,11 +1068,22 @@
* immediately purge it
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
- hlist_del_rcu(&tt_local_entry->common.hash_entry);
+
+ tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+ batadv_compare_tt,
+ batadv_choose_tt,
+ &tt_local_entry->common);
+ if (!tt_entry_exists)
+ goto out;
+
+ /* extra call to free the local tt entry */
batadv_tt_local_entry_free_ref(tt_local_entry);
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, vid);
+ if (!vlan)
+ goto out;
+
batadv_softif_vlan_free_ref(vlan);
batadv_softif_vlan_free_ref(vlan);
@@ -1166,8 +1184,10 @@
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv,
tt_common_entry->vid);
- batadv_softif_vlan_free_ref(vlan);
- batadv_softif_vlan_free_ref(vlan);
+ if (vlan) {
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+ }
batadv_tt_local_entry_free_ref(tt_local);
}
@@ -3207,8 +3227,10 @@
/* decrease the reference held for this vlan */
vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
- batadv_softif_vlan_free_ref(vlan);
- batadv_softif_vlan_free_ref(vlan);
+ if (vlan) {
+ batadv_softif_vlan_free_ref(vlan);
+ batadv_softif_vlan_free_ref(vlan);
+ }
batadv_tt_local_entry_free_ref(tt_local);
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7998fb2..92720f3 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7820,7 +7820,7 @@
/* Make sure we copy only the significant bytes based on the
* encryption key size, and set the rest of the value to zeroes.
*/
- memcpy(ev.key.val, key->val, sizeof(key->enc_size));
+ memcpy(ev.key.val, key->val, key->enc_size);
memset(ev.key.val + key->enc_size, 0,
sizeof(ev.key.val) - key->enc_size);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0b39dcc..1285eaf 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1591,7 +1591,7 @@
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
@@ -1636,7 +1636,7 @@
break;
}
- if (skb_trimmed)
+ if (skb_trimmed && skb_trimmed != skb)
kfree_skb(skb_trimmed);
return err;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3da5525..4d74a06 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -112,6 +112,8 @@
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ 0;
}
@@ -506,6 +508,8 @@
[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
[IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
+ [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
+ [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
};
/* Change the state of the port and notify spanning tree */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 4967262..617088a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,12 +131,12 @@
goto out;
}
-static int skb_set_peeked(struct sk_buff *skb)
+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
{
struct sk_buff *nskb;
if (skb->peeked)
- return 0;
+ return skb;
/* We have to unshare an skb before modifying it. */
if (!skb_shared(skb))
@@ -144,7 +144,7 @@
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
skb->prev->next = nskb;
skb->next->prev = nskb;
@@ -157,7 +157,7 @@
done:
skb->peeked = 1;
- return 0;
+ return skb;
}
/**
@@ -229,8 +229,9 @@
continue;
}
- error = skb_set_peeked(skb);
- if (error)
+ skb = skb_set_peeked(skb);
+ error = PTR_ERR(skb);
+ if (IS_ERR(skb))
goto unlock_err;
atomic_inc(&skb->users);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1ebdf1c0..1cbd209 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3514,8 +3514,6 @@
set_freezable();
- __set_current_state(TASK_RUNNING);
-
while (!kthread_should_stop()) {
pkt_dev = next_to_run(t);
@@ -3560,7 +3558,6 @@
try_to_freeze();
}
- set_current_state(TASK_INTERRUPTIBLE);
pr_debug("%s stopping all device\n", t->tsk->comm);
pktgen_stop(t);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 87b22c0..b42f0e2 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -103,10 +103,16 @@
spin_lock_bh(&queue->syn_wait_lock);
while ((req = lopt->syn_table[i]) != NULL) {
lopt->syn_table[i] = req->dl_next;
+ /* Because of following del_timer_sync(),
+ * we must release the spinlock here
+ * or risk a dead lock.
+ */
+ spin_unlock_bh(&queue->syn_wait_lock);
atomic_inc(&lopt->qlen_dec);
- if (del_timer(&req->rsk_timer))
+ if (del_timer_sync(&req->rsk_timer))
reqsk_put(req);
reqsk_put(req);
+ spin_lock_bh(&queue->syn_wait_lock);
}
spin_unlock_bh(&queue->syn_wait_lock);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a19ca..7b84330 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -340,7 +340,7 @@
if (skb && frag_size) {
skb->head_frag = 1;
- if (virt_to_head_page(data)->pfmemalloc)
+ if (page_is_pfmemalloc(virt_to_head_page(data)))
skb->pfmemalloc = 1;
}
return skb;
@@ -4022,8 +4022,8 @@
* Otherwise returns the provided skb. Returns NULL in error cases
* (e.g. transport_len exceeds skb length or out-of-memory).
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
unsigned int transport_len)
@@ -4032,16 +4032,12 @@
unsigned int len = skb_transport_offset(skb) + transport_len;
int ret;
- if (skb->len < len) {
- kfree_skb(skb);
+ if (skb->len < len)
return NULL;
- } else if (skb->len == len) {
+ else if (skb->len == len)
return skb;
- }
skb_chk = skb_clone(skb, GFP_ATOMIC);
- kfree_skb(skb);
-
if (!skb_chk)
return NULL;
@@ -4066,8 +4062,8 @@
* If the skb has data beyond the given transport length, then a
* trimmed & cloned skb is checked and returned.
*
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
*/
struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
unsigned int transport_len,
@@ -4079,23 +4075,26 @@
skb_chk = skb_checksum_maybe_trim(skb, transport_len);
if (!skb_chk)
- return NULL;
+ goto err;
- if (!pskb_may_pull(skb_chk, offset)) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (!pskb_may_pull(skb_chk, offset))
+ goto err;
__skb_pull(skb_chk, offset);
ret = skb_chkf(skb_chk);
__skb_push(skb_chk, offset);
- if (ret) {
- kfree_skb(skb_chk);
- return NULL;
- }
+ if (ret)
+ goto err;
return skb_chk;
+
+err:
+ if (skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return NULL;
+
}
EXPORT_SYMBOL(skb_checksum_trimmed);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0917123..35c47dd 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -756,7 +756,8 @@
return -ENODEV;
/* Use already configured phy mode */
- p->phy_interface = p->phy->interface;
+ if (p->phy_interface == PHY_INTERFACE_MODE_NA)
+ p->phy_interface = p->phy->interface;
phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
p->phy_interface);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 37c4bb8..b0c6258 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2465,7 +2465,7 @@
key = l->key + 1;
iter->pos++;
- if (pos-- <= 0)
+ if (--pos <= 0)
break;
l = NULL;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 651cdf6..9fdfd9d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1435,33 +1435,35 @@
struct sk_buff *skb_chk;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ip_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ip_mc_check_igmp_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
@@ -1470,7 +1472,7 @@
* @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
*
* Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
@@ -1485,7 +1487,8 @@
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 60021d0..1349571 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@
}
spin_unlock(&queue->syn_wait_lock);
- if (del_timer(&req->rsk_timer))
+ if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
reqsk_put(req);
return found;
}
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index fe8cc18..95ea633e 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -226,7 +226,8 @@
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
}
static bool
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 433231c..0330ab2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,8 +41,6 @@
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@
.maxlen = sizeof(sysctl_tcp_wmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@
.maxlen = sizeof(sysctl_tcp_rmem),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one,
},
{
.procname = "tcp_app_win",
@@ -758,7 +756,7 @@
.maxlen = sizeof(sysctl_udp_rmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_rcvbuf,
+ .extra1 = &one
},
{
.procname = "udp_wmem_min",
@@ -766,7 +764,7 @@
.maxlen = sizeof(sysctl_udp_wmem_min),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &min_sndbuf,
+ .extra1 = &one
},
{ }
};
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d7d4c2b..0ea2e1c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,7 @@
req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- if (!nsk)
+ if (!nsk || nsk == sk)
reqsk_put(req);
return nsk;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83aa604..1b8c5ba 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1995,12 +1995,19 @@
skb->sk = sk;
skb->destructor = sock_efree;
- dst = sk->sk_rx_dst;
+ dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
- if (dst)
- skb_dst_set_noref(skb, dst);
+ if (dst) {
+ /* DST_NOCACHE can not be used without taking a reference */
+ if (dst->flags & DST_NOCACHE) {
+ if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+ skb_dst_set(skb, dst);
+ } else {
+ skb_dst_set_noref(skb, dst);
+ }
+ }
}
int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 55d1986..548c623 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -172,6 +172,8 @@
*ppcpu_rt = NULL;
}
}
+
+ non_pcpu_rt->rt6i_pcpu = NULL;
}
static void rt6_release(struct rt6_info *rt)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index a38d3ac..69f4f68 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -361,6 +361,7 @@
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
ip6gre_tunnel_unlink(ign, t);
+ ip6_tnl_dst_reset(t);
dev_put(dev);
}
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
index df8afe5..9405b04 100644
--- a/net/ipv6/mcast_snoop.c
+++ b/net/ipv6/mcast_snoop.c
@@ -143,34 +143,36 @@
struct sk_buff *skb_chk = NULL;
unsigned int transport_len;
unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
- int ret;
+ int ret = -EINVAL;
transport_len = ntohs(ipv6_hdr(skb)->payload_len);
transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
- skb_get(skb);
skb_chk = skb_checksum_trimmed(skb, transport_len,
ipv6_mc_validate_checksum);
if (!skb_chk)
- return -EINVAL;
+ goto err;
- if (!pskb_may_pull(skb_chk, len)) {
- kfree_skb(skb_chk);
- return -EINVAL;
- }
+ if (!pskb_may_pull(skb_chk, len))
+ goto err;
ret = ipv6_mc_check_mld_msg(skb_chk);
- if (ret) {
- kfree_skb(skb_chk);
- return ret;
- }
+ if (ret)
+ goto err;
if (skb_trimmed)
*skb_trimmed = skb_chk;
- else
+ /* free now unneeded clone */
+ else if (skb_chk != skb)
kfree_skb(skb_chk);
- return 0;
+ ret = 0;
+
+err:
+ if (ret && skb_chk && skb_chk != skb)
+ kfree_skb(skb_chk);
+
+ return ret;
}
/**
@@ -179,7 +181,7 @@
* @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
*
* Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
*
* -EINVAL: A broken packet was detected, i.e. it violates some internet
* standard
@@ -194,7 +196,8 @@
* to leave the original skb and its full frame unchanged (which might be
* desirable for layer 2 frame jugglers).
*
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
*/
int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
{
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 6edb7b1..ebbb754 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@
}
static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+ const struct sk_buff *skb, struct sk_buff *nskb,
struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
struct ipv6hdr *niph, struct tcphdr *nth,
unsigned int tcp_hdr_size)
{
- struct net *net = nf_ct_net((struct nf_conn *)nfct);
+ struct net *net = nf_ct_net(snet->tmpl);
struct dst_entry *dst;
struct flowi6 fl6;
@@ -83,7 +84,8 @@
}
static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
const struct synproxy_options *opts)
{
struct sk_buff *nskb;
@@ -119,7 +121,7 @@
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
niph, nth, tcp_hdr_size);
}
@@ -163,7 +165,7 @@
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
niph, nth, tcp_hdr_size);
}
@@ -203,7 +205,7 @@
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
}
static void
@@ -241,7 +243,8 @@
synproxy_build_options(nth, opts);
- synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+ synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
}
static bool
@@ -301,7 +304,7 @@
XT_SYNPROXY_OPT_SACK_PERM |
XT_SYNPROXY_OPT_ECN);
- synproxy_send_client_synack(skb, th, &opts);
+ synproxy_send_client_synack(snet, skb, th, &opts);
return NF_DROP;
} else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6090969..d155864 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -318,8 +318,7 @@
/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@
static struct rt6_info *ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags,
- struct fib6_table *table)
+ int flags)
{
- struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+ struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
if (rt) {
rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -950,8 +948,7 @@
if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
ort = (struct rt6_info *)ort->dst.from;
- rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
- 0, ort->rt6i_table);
+ rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
if (!rt)
return NULL;
@@ -983,8 +980,7 @@
struct rt6_info *pcpu_rt;
pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
- rt->dst.dev, rt->dst.flags,
- rt->rt6i_table);
+ rt->dst.dev, rt->dst.flags);
if (!pcpu_rt)
return NULL;
@@ -997,32 +993,53 @@
/* It should be called with read_lock_bh(&tb6_lock) acquired */
static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
{
- struct rt6_info *pcpu_rt, *prev, **p;
+ struct rt6_info *pcpu_rt, **p;
p = this_cpu_ptr(rt->rt6i_pcpu);
pcpu_rt = *p;
- if (pcpu_rt)
- goto done;
+ if (pcpu_rt) {
+ dst_hold(&pcpu_rt->dst);
+ rt6_dst_from_metrics_check(pcpu_rt);
+ }
+ return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+ struct fib6_table *table = rt->rt6i_table;
+ struct rt6_info *pcpu_rt, *prev, **p;
pcpu_rt = ip6_rt_pcpu_alloc(rt);
if (!pcpu_rt) {
struct net *net = dev_net(rt->dst.dev);
- pcpu_rt = net->ipv6.ip6_null_entry;
- goto done;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return net->ipv6.ip6_null_entry;
}
- prev = cmpxchg(p, NULL, pcpu_rt);
- if (prev) {
- /* If someone did it before us, return prev instead */
+ read_lock_bh(&table->tb6_lock);
+ if (rt->rt6i_pcpu) {
+ p = this_cpu_ptr(rt->rt6i_pcpu);
+ prev = cmpxchg(p, NULL, pcpu_rt);
+ if (prev) {
+ /* If someone did it before us, return prev instead */
+ dst_destroy(&pcpu_rt->dst);
+ pcpu_rt = prev;
+ }
+ } else {
+ /* rt has been removed from the fib6 tree
+ * before we have a chance to acquire the read_lock.
+ * In this case, don't brother to create a pcpu rt
+ * since rt is going away anyway. The next
+ * dst_check() will trigger a re-lookup.
+ */
dst_destroy(&pcpu_rt->dst);
- pcpu_rt = prev;
+ pcpu_rt = rt;
}
-
-done:
dst_hold(&pcpu_rt->dst);
rt6_dst_from_metrics_check(pcpu_rt);
+ read_unlock_bh(&table->tb6_lock);
return pcpu_rt;
}
@@ -1097,9 +1114,22 @@
rt->dst.lastuse = jiffies;
rt->dst.__use++;
pcpu_rt = rt6_get_pcpu_route(rt);
- read_unlock_bh(&table->tb6_lock);
+
+ if (pcpu_rt) {
+ read_unlock_bh(&table->tb6_lock);
+ } else {
+ /* We have to do the read_unlock first
+ * because rt6_make_pcpu_route() may trigger
+ * ip6_dst_gc() which will take the write_lock.
+ */
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+ pcpu_rt = rt6_make_pcpu_route(rt);
+ dst_release(&rt->dst);
+ }
return pcpu_rt;
+
}
}
@@ -1555,7 +1585,7 @@
if (unlikely(!idev))
return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(net, dev, 0, NULL);
+ rt = ip6_dst_alloc(net, dev, 0);
if (unlikely(!rt)) {
in6_dev_put(idev);
dst = ERR_PTR(-ENOMEM);
@@ -1742,7 +1772,8 @@
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+ rt = ip6_dst_alloc(net, NULL,
+ (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
if (!rt) {
err = -ENOMEM;
@@ -1831,6 +1862,7 @@
int gwa_type;
gw_addr = &cfg->fc_gateway;
+ gwa_type = ipv6_addr_type(gw_addr);
/* if gw_addr is local we will fail to detect this in case
* address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1838,11 +1870,12 @@
* prefix route was assigned to, which might be non-loopback.
*/
err = -EINVAL;
- if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+ if (ipv6_chk_addr_and_flags(net, gw_addr,
+ gwa_type & IPV6_ADDR_LINKLOCAL ?
+ dev : NULL, 0, 0))
goto out;
rt->rt6i_gateway = *gw_addr;
- gwa_type = ipv6_addr_type(gw_addr);
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
struct rt6_info *grt;
@@ -2397,7 +2430,7 @@
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
- DST_NOCOUNT, NULL);
+ DST_NOCOUNT);
if (!rt)
return ERR_PTR(-ENOMEM);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6748c42..7a6cea5 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -943,7 +943,7 @@
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- if (!nsk)
+ if (!nsk || nsk == sk)
reqsk_put(req);
return nsk;
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index b397f0a..83a7068 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -219,7 +219,7 @@
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
+static int pfkey_broadcast(struct sk_buff *skb,
int broadcast_flags, struct sock *one_sk,
struct net *net)
{
@@ -244,7 +244,7 @@
* socket.
*/
if (pfk->promisc)
- pfkey_broadcast_one(skb, &skb2, allocation, sk);
+ pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
/* the exact target will be processed later */
if (sk == one_sk)
@@ -259,7 +259,7 @@
continue;
}
- err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk);
+ err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
/* Error is cleare after succecful sending to at least one
* registered KM */
@@ -269,7 +269,7 @@
rcu_read_unlock();
if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+ err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
kfree_skb(skb2);
kfree_skb(skb);
@@ -292,7 +292,7 @@
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc;
- pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL;
}
@@ -333,7 +333,7 @@
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t));
- pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@@ -1365,7 +1365,7 @@
xfrm_state_put(x);
- pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
+ pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
return 0;
}
@@ -1452,7 +1452,7 @@
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
return 0;
}
@@ -1565,7 +1565,7 @@
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@@ -1670,7 +1670,7 @@
return -ENOBUFS;
}
- pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, sock_net(sk));
+ pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
return 0;
}
@@ -1689,7 +1689,7 @@
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
- return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+ return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
}
static int key_notify_sa_flush(const struct km_event *c)
@@ -1710,7 +1710,7 @@
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
return 0;
}
@@ -1767,7 +1767,7 @@
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@@ -1847,7 +1847,7 @@
new_hdr->sadb_msg_errno = 0;
}
- pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}
@@ -2181,7 +2181,7 @@
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
+ pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
return 0;
}
@@ -2401,7 +2401,7 @@
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
+ pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
err = 0;
out:
@@ -2655,7 +2655,7 @@
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@@ -2708,7 +2708,7 @@
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
return 0;
}
@@ -2770,7 +2770,7 @@
void *ext_hdrs[SADB_EXT_MAX];
int err;
- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+ pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -2992,7 +2992,7 @@
out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+ pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
return 0;
}
@@ -3182,7 +3182,7 @@
xfrm_ctx->ctx_len);
}
- return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
}
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3380,7 +3380,7 @@
n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0;
- return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
}
#ifdef CONFIG_NET_KEY_MIGRATE
@@ -3572,7 +3572,7 @@
}
/* broadcast migrate message to sockets */
- pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
return 0;
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 247552a..3ece7d1 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -92,14 +92,15 @@
static inline void
minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
- int j = MAX_THR_RATES;
- struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+ int j;
+ struct minstrel_rate_stats *tmp_mrs;
struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
- while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
- minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
- j--;
+ for (j = MAX_THR_RATES; j > 0; --j) {
tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+ if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+ minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+ break;
}
if (j < MAX_THR_RATES - 1)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 651039a..3c20d02 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -292,7 +292,7 @@
{
struct nf_conn *tmpl;
- tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
+ tmpl = kzalloc(sizeof(*tmpl), flags);
if (tmpl == NULL)
return NULL;
@@ -303,7 +303,7 @@
if (zone) {
struct nf_conntrack_zone *nf_ct_zone;
- nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
+ nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
if (!nf_ct_zone)
goto out_free;
nf_ct_zone->id = zone;
@@ -1544,10 +1544,8 @@
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
- if (!hash) {
- printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+ if (!hash)
hash = vzalloc(sz);
- }
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 71f1e9f..d7f1685 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -353,10 +353,8 @@
int err = -ENOMEM;
ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
- if (IS_ERR(ct)) {
- err = PTR_ERR(ct);
+ if (!ct)
goto err1;
- }
if (!nfct_seqadj_ext_add(ct))
goto err2;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index c663003..43ddeee 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -202,9 +202,10 @@
goto err1;
ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
- ret = PTR_ERR(ct);
- if (IS_ERR(ct))
+ if (!ct) {
+ ret = -ENOMEM;
goto err2;
+ }
ret = 0;
if ((info->ct_events || info->exp_events) &&
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d8e2e39..a774985 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1096,6 +1096,11 @@
err = __netlink_insert(table, sk);
if (err) {
+ /* In case the hashtable backend returns with -EBUSY
+ * from here, it must not escape to the caller.
+ */
+ if (unlikely(err == -EBUSY))
+ err = -EOVERFLOW;
if (err == -EEXIST)
err = -EADDRINUSE;
nlk_sk(sk)->portid = 0;
@@ -2396,7 +2401,7 @@
* sendmsg(), but that's what we've got...
*/
if (netlink_tx_is_mmaped(sk) &&
- msg->msg_iter.type == ITER_IOVEC &&
+ iter_is_iovec(&msg->msg_iter) &&
msg->msg_iter.nr_segs == 1 &&
msg->msg_iter.iov->iov_base == NULL) {
err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8a8c0b8..ee34f47 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -273,28 +273,36 @@
return 0;
}
-static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
- __be32 *addr, __be32 new_addr)
+static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
+ __be32 addr, __be32 new_addr)
{
int transport_len = skb->len - skb_transport_offset(skb);
+ if (nh->frag_off & htons(IP_OFFSET))
+ return;
+
if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
- *addr, new_addr, 1);
+ addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) {
if (likely(transport_len >= sizeof(struct udphdr))) {
struct udphdr *uh = udp_hdr(skb);
if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&uh->check, skb,
- *addr, new_addr, 1);
+ addr, new_addr, 1);
if (!uh->check)
uh->check = CSUM_MANGLED_0;
}
}
}
+}
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+ __be32 *addr, __be32 new_addr)
+{
+ update_ip_l4_checksum(skb, nh, *addr, new_addr);
csum_replace4(&nh->check, *addr, new_addr);
skb_clear_hash(skb);
*addr = new_addr;
diff --git a/net/rds/info.c b/net/rds/info.c
index 9a6b4f6..140a44a5f 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@
/* check for all kinds of wrapping and the like */
start = (unsigned long)optval;
- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
ret = -EINVAL;
goto out;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a42a3b2..2685450 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -98,6 +98,8 @@
return ret;
ret = ACT_P_CREATED;
} else {
+ if (bind)
+ return 0;
if (!ovr) {
tcf_hash_release(a, bind);
return -EEXIST;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index cab9e9b..4fbb674 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -490,6 +490,19 @@
return false;
}
}
+
+ if (tp_c->refcnt > 1)
+ return false;
+
+ if (tp_c->refcnt == 1) {
+ struct tc_u_hnode *ht;
+
+ for (ht = rtnl_dereference(tp_c->hlist);
+ ht;
+ ht = rtnl_dereference(ht->next))
+ if (!ht_empty(ht))
+ return false;
+ }
}
if (root_ht && --root_ht->refcnt == 0)
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 21ca33c..a9ba030 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -288,10 +288,26 @@
static void fq_codel_reset(struct Qdisc *sch)
{
- struct sk_buff *skb;
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ int i;
- while ((skb = fq_codel_dequeue(sch)) != NULL)
- kfree_skb(skb);
+ INIT_LIST_HEAD(&q->new_flows);
+ INIT_LIST_HEAD(&q->old_flows);
+ for (i = 0; i < q->flows_cnt; i++) {
+ struct fq_codel_flow *flow = q->flows + i;
+
+ while (flow->head) {
+ struct sk_buff *skb = dequeue_head(flow);
+
+ qdisc_qstats_backlog_dec(sch, skb);
+ kfree_skb(skb);
+ }
+
+ INIT_LIST_HEAD(&flow->flowchain);
+ codel_vars_init(&flow->cvars);
+ }
+ memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
+ sch->q.qlen = 0;
}
static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 06320c8..a655ddc 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3132,11 +3132,18 @@
case SCTP_PARAM_IPV4_ADDRESS:
if (length != sizeof(sctp_ipv4addr_param_t))
return false;
+ /* ensure there is only one addr param and it's in the
+ * beginning of addip_hdr params, or we reject it.
+ */
+ if (param.v != addip->addip_hdr.params)
+ return false;
addr_param_seen = true;
break;
case SCTP_PARAM_IPV6_ADDRESS:
if (length != sizeof(sctp_ipv6addr_param_t))
return false;
+ if (param.v != addip->addip_hdr.params)
+ return false;
addr_param_seen = true;
break;
case SCTP_PARAM_ADD_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fef2acd..85e6f03 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -702,7 +702,7 @@
* outstanding data and rely on the retransmission limit be reached
* to shutdown the association.
*/
- if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
t->asoc->overall_error_count = 0;
/* Clear the hb_sent flag to signal that we had a good
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 9cb8522..f3d3fb4 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -137,7 +137,7 @@
my $kconfig = $ARGV[1];
my $lsmod_file = $ENV{'LSMOD'};
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
chomp @makefiles;
my %depends;
diff --git a/security/security.c b/security/security.c
index 595fffa..9942836 100644
--- a/security/security.c
+++ b/security/security.c
@@ -380,8 +380,8 @@
return 0;
if (!initxattrs)
- return call_int_hook(inode_init_security, 0, inode, dir, qstr,
- NULL, NULL, NULL);
+ return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
+ dir, qstr, NULL, NULL, NULL);
memset(new_xattrs, 0, sizeof(new_xattrs));
lsm_xattr = new_xattrs;
ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
@@ -409,8 +409,8 @@
{
if (unlikely(IS_PRIVATE(inode)))
return -EOPNOTSUPP;
- return call_int_hook(inode_init_security, 0, inode, dir, qstr,
- name, value, len);
+ return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
+ qstr, name, value, len);
}
EXPORT_SYMBOL(security_old_inode_init_security);
@@ -1281,7 +1281,8 @@
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
- return call_int_hook(socket_getpeersec_dgram, 0, sock, skb, secid);
+ return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
+ skb, secid);
}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5de3c5d..d1a2cb6 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3172,7 +3172,7 @@
struct snd_pcm_chmap *chmap;
const struct snd_pcm_chmap_elem *elem;
- if (!pcm || pcm->own_chmap ||
+ if (!pcm || !pcm->pcm || pcm->own_chmap ||
!hinfo->substreams)
continue;
elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b077bb6..24f9111 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -671,7 +671,8 @@
}
for (i = 0; i < path->depth; i++) {
if (path->path[i] == nid) {
- if (dir == HDA_OUTPUT || path->idx[i] == idx)
+ if (dir == HDA_OUTPUT || idx == -1 ||
+ path->idx[i] == idx)
return true;
break;
}
@@ -682,7 +683,7 @@
/* check whether the NID is referred by any active paths */
#define is_active_nid_for_any(codec, nid) \
- is_active_nid(codec, nid, HDA_OUTPUT, 0)
+ is_active_nid(codec, nid, HDA_OUTPUT, -1)
/* get the default amp value for the target state */
static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
@@ -883,8 +884,7 @@
struct hda_gen_spec *spec = codec->spec;
int i;
- if (!enable)
- path->active = false;
+ path->active = enable;
/* make sure the widget is powered up */
if (enable && (spec->power_down_unused || codec->power_save_node))
@@ -902,9 +902,6 @@
if (has_amp_out(codec, path, i))
activate_amp_out(codec, path, i, enable);
}
-
- if (enable)
- path->active = true;
}
EXPORT_SYMBOL_GPL(snd_hda_activate_path);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f788a91..ca03c40 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -200,12 +200,33 @@
return 0;
}
-#define cx_auto_free snd_hda_gen_free
+static void cx_auto_reboot_notify(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+
+ if (codec->core.vendor_id != 0x14f150f2)
+ return;
+
+ /* Turn the CX20722 codec into D3 to avoid spurious noises
+ from the internal speaker during (and after) reboot */
+ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+
+ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+}
+
+static void cx_auto_free(struct hda_codec *codec)
+{
+ cx_auto_reboot_notify(codec);
+ snd_hda_gen_free(codec);
+}
static const struct hda_codec_ops cx_auto_patch_ops = {
.build_controls = cx_auto_build_controls,
.build_pcms = snd_hda_gen_build_pcms,
.init = cx_auto_init,
+ .reboot_notify = cx_auto_reboot_notify,
.free = cx_auto_free,
.unsol_event = snd_hda_jack_unsol_event,
#ifdef CONFIG_PM
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0b9847a..374ea53 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5190,6 +5190,7 @@
SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5291,6 +5292,7 @@
SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 2ae9619..1d651b8 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -30,6 +30,9 @@
bool
select SND_DMAENGINE_PCM
+config SND_SOC_TOPOLOGY
+ bool
+
# All the supported SoCs
source "sound/soc/adi/Kconfig"
source "sound/soc/atmel/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index e189903..669648b 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,6 +1,9 @@
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
snd-soc-core-objs += soc-topology.o
+endif
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
snd-soc-core-objs += soc-generic-dmaengine-pcm.o
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1fab977..0450593 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -638,7 +638,7 @@
int err = -ENODEV;
down_read(&chip->shutdown_rwsem);
- if (chip->probing && chip->in_pm)
+ if (chip->probing || chip->in_pm)
err = 0;
else if (!chip->shutdown)
err = usb_autopm_get_interface(chip->pm_intf);
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 754e689..00ebc0c 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1268,6 +1268,7 @@
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
+ case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
if (fp->altsetting == 3)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index de165a1..20b56eb 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -521,6 +521,15 @@
goto out_child;
}
+ /*
+ * Normally perf_session__new would do this, but it doesn't have the
+ * evlist.
+ */
+ if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+ pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
+ rec->tool.ordered_events = false;
+ }
+
if (!rec->evlist->nr_groups)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
@@ -965,9 +974,11 @@
.tool = {
.sample = process_sample_event,
.fork = perf_event__process_fork,
+ .exit = perf_event__process_exit,
.comm = perf_event__process_comm,
.mmap = perf_event__process_mmap,
.mmap2 = perf_event__process_mmap2,
+ .ordered_events = true,
},
};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ecf3197..6135cc0 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -601,8 +601,8 @@
static void display_setup_sig(void)
{
- signal(SIGSEGV, display_sig);
- signal(SIGFPE, display_sig);
+ signal(SIGSEGV, sighandler_dump_stack);
+ signal(SIGFPE, sighandler_dump_stack);
signal(SIGINT, display_sig);
signal(SIGQUIT, display_sig);
signal(SIGTERM, display_sig);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 094ddae..d31fac1 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -638,7 +638,7 @@
prefix ?= $(HOME)
endif
bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 7ff6827..f1a4c83 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1387,6 +1387,24 @@
event->fork.ptid);
int err = 0;
+ if (dump_trace)
+ perf_event__fprintf_task(event, stdout);
+
+ /*
+ * There may be an existing thread that is not actually the parent,
+ * either because we are processing events out of order, or because the
+ * (fork) event that would have removed the thread was lost. Assume the
+ * latter case and continue on as best we can.
+ */
+ if (parent->pid_ != (pid_t)event->fork.ppid) {
+ dump_printf("removing erroneous parent thread %d/%d\n",
+ parent->pid_, parent->tid);
+ machine__remove_thread(machine, parent);
+ thread__put(parent);
+ parent = machine__findnew_thread(machine, event->fork.ppid,
+ event->fork.ptid);
+ }
+
/* if a thread currently exists for the thread id remove it */
if (thread != NULL) {
machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@
thread = machine__findnew_thread(machine, event->fork.pid,
event->fork.tid);
- if (dump_trace)
- perf_event__fprintf_task(event, stdout);
if (thread == NULL || parent == NULL ||
thread__fork(thread, parent, sample->time) < 0) {
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 53e8bb7..2a5d8d7 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -85,7 +85,7 @@
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@
" # %5.2f%% aborted cycles ",
100.0 * ((total2-avg) / total));
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / transaction ", ratio);
} else if (perf_stat_evsel__is(evsel, ELISION_START) &&
- avg > 0 &&
runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
- if (total)
+ if (avg)
ratio = total / avg;
fprintf(out, " # %8.0f cycles / elision ", ratio);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 28c4b74..0a9ae80 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -191,6 +191,12 @@
if (thread->pid_ == parent->pid_)
return 0;
+ if (thread->mg == parent->mg) {
+ pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
+ thread->pid_, thread->tid, parent->pid_, parent->tid);
+ return 0;
+ }
+
/* But this one is new process, copy maps. */
for (i = 0; i < MAP__NR_TYPES; ++i)
if (map_groups__clone(thread->mg, parent->mg, i) < 0)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8b8a444..d8db2f8f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2206,6 +2206,11 @@
}
kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+
+ /*
+ * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
+ * before kvm->online_vcpu's incremented value.
+ */
smp_wmb();
atomic_inc(&kvm->online_vcpus);
@@ -2618,9 +2623,6 @@
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- case KVM_CAP_SET_BOOT_CPU_ID:
-#endif
case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_CAP_SIGNAL_MSI:
@@ -2716,17 +2718,6 @@
r = kvm_ioeventfd(kvm, &data);
break;
}
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- case KVM_SET_BOOT_CPU_ID:
- r = 0;
- mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus) != 0)
- r = -EBUSY;
- else
- kvm->bsp_vcpu_id = arg;
- mutex_unlock(&kvm->lock);
- break;
-#endif
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_SIGNAL_MSI: {
struct kvm_msi msi;